Update Windows Python3 prebuilt

Fusion2: http://fusion2/e1653ce5-a00b-4501-8f0f-1cb270563905
GCS path: gs://ndk-kokoro-release-artifacts/prod/ndk/python3/windows_release/4/20220830-004242
Prebuilt updated using: ndk/scripts/update_kokoro_prebuilts.py

Test: Treehugger, Kokoro presubmit
Bug: 244197859
Change-Id: I7e3cb5b8add31fd59e4085dba550c6fb436f1ed8
diff --git a/DLLs/_asyncio.pyd b/DLLs/_asyncio.pyd
index 6c3d753..edff189 100644
--- a/DLLs/_asyncio.pyd
+++ b/DLLs/_asyncio.pyd
Binary files differ
diff --git a/DLLs/_bz2.pyd b/DLLs/_bz2.pyd
index 646e1ce..78901da 100644
--- a/DLLs/_bz2.pyd
+++ b/DLLs/_bz2.pyd
Binary files differ
diff --git a/DLLs/_ctypes.pyd b/DLLs/_ctypes.pyd
index 68a2e8a..9c9e213 100644
--- a/DLLs/_ctypes.pyd
+++ b/DLLs/_ctypes.pyd
Binary files differ
diff --git a/DLLs/_decimal.pyd b/DLLs/_decimal.pyd
index 514f287..084359c 100644
--- a/DLLs/_decimal.pyd
+++ b/DLLs/_decimal.pyd
Binary files differ
diff --git a/DLLs/_elementtree.pyd b/DLLs/_elementtree.pyd
index 2bb99aa..926fa3d 100644
--- a/DLLs/_elementtree.pyd
+++ b/DLLs/_elementtree.pyd
Binary files differ
diff --git a/DLLs/_lzma.pyd b/DLLs/_lzma.pyd
index 50d2914..8d90e61 100644
--- a/DLLs/_lzma.pyd
+++ b/DLLs/_lzma.pyd
Binary files differ
diff --git a/DLLs/_msi.pyd b/DLLs/_msi.pyd
index f7e981e..8aab1d4 100644
--- a/DLLs/_msi.pyd
+++ b/DLLs/_msi.pyd
Binary files differ
diff --git a/DLLs/_multiprocessing.pyd b/DLLs/_multiprocessing.pyd
index ad1ad4a..068b3ab 100644
--- a/DLLs/_multiprocessing.pyd
+++ b/DLLs/_multiprocessing.pyd
Binary files differ
diff --git a/DLLs/_overlapped.pyd b/DLLs/_overlapped.pyd
index cefc700..d95ed6a 100644
--- a/DLLs/_overlapped.pyd
+++ b/DLLs/_overlapped.pyd
Binary files differ
diff --git a/DLLs/_queue.pyd b/DLLs/_queue.pyd
index a9fe16b..7bbacdf 100644
--- a/DLLs/_queue.pyd
+++ b/DLLs/_queue.pyd
Binary files differ
diff --git a/DLLs/_socket.pyd b/DLLs/_socket.pyd
index 764531d..a7e8832 100644
--- a/DLLs/_socket.pyd
+++ b/DLLs/_socket.pyd
Binary files differ
diff --git a/DLLs/_uuid.pyd b/DLLs/_uuid.pyd
index 6ec9487..a781883 100644
--- a/DLLs/_uuid.pyd
+++ b/DLLs/_uuid.pyd
Binary files differ
diff --git a/DLLs/_zoneinfo.pyd b/DLLs/_zoneinfo.pyd
index fcd929b..e5c7957 100644
--- a/DLLs/_zoneinfo.pyd
+++ b/DLLs/_zoneinfo.pyd
Binary files differ
diff --git a/DLLs/libffi.dll b/DLLs/libffi.dll
index 1549171..6d6e9c6 100644
--- a/DLLs/libffi.dll
+++ b/DLLs/libffi.dll
Binary files differ
diff --git a/DLLs/pyexpat.pyd b/DLLs/pyexpat.pyd
index 4921cab..e57a242 100644
--- a/DLLs/pyexpat.pyd
+++ b/DLLs/pyexpat.pyd
Binary files differ
diff --git a/DLLs/select.pyd b/DLLs/select.pyd
index 2b46547..9e0d3a7 100644
--- a/DLLs/select.pyd
+++ b/DLLs/select.pyd
Binary files differ
diff --git a/DLLs/unicodedata.pyd b/DLLs/unicodedata.pyd
index d18d477..4741e63 100644
--- a/DLLs/unicodedata.pyd
+++ b/DLLs/unicodedata.pyd
Binary files differ
diff --git a/DLLs/winsound.pyd b/DLLs/winsound.pyd
index 02b6e8f..5fa3d3c 100644
--- a/DLLs/winsound.pyd
+++ b/DLLs/winsound.pyd
Binary files differ
diff --git a/Lib/LICENSE.txt b/Lib/LICENSE.txt
index f03a60c..ceba950 100644
--- a/Lib/LICENSE.txt
+++ b/Lib/LICENSE.txt
@@ -84,7 +84,7 @@
 distribute, and otherwise use Python alone or in any derivative version,
 provided, however, that PSF's License Agreement and PSF's notice of copyright,
 i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
 All Rights Reserved" are retained in Python alone or in any derivative version
 prepared by Licensee.
 
@@ -191,9 +191,9 @@
 Agreement, Licensee may substitute the following text (omitting the
 quotes): "Python 1.6.1 is made available subject to the terms and
 conditions in CNRI's License Agreement.  This Agreement together with
-Python 1.6.1 may be located on the Internet using the following
+Python 1.6.1 may be located on the internet using the following
 unique, persistent identifier (known as a handle): 1895.22/1013.  This
-Agreement may also be obtained from a proxy server on the Internet
+Agreement may also be obtained from a proxy server on the internet
 using the following URL: http://hdl.handle.net/1895.22/1013".
 
 3. In the event Licensee prepares a derivative work that is based on
@@ -385,9 +385,9 @@
 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 

-version 1.2.11, January 15th, 2017
+version 1.2.12, March 27th, 2022
 
-Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
+Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
 
 This software is provided 'as-is', without any express or implied
 warranty.  In no event will the authors be held liable for any damages
diff --git a/Lib/__future__.py b/Lib/__future__.py
index 0e7b555..97dc90c 100644
--- a/Lib/__future__.py
+++ b/Lib/__future__.py
@@ -42,7 +42,7 @@
 argument to the builtin function compile() to enable the feature in
 dynamically compiled code.  This flag is stored in the .compiler_flag
 attribute on _Future instances.  These values must match the appropriate
-#defines of CO_xxx flags in Include/compile.h.
+#defines of CO_xxx flags in Include/cpython/compile.h.
 
 No feature line is ever to be deleted from this file.
 """
@@ -143,5 +143,5 @@
                           CO_FUTURE_GENERATOR_STOP)
 
 annotations = _Feature((3, 7, 0, "beta", 1),
-                       (3, 10, 0, "alpha", 0),
+                       (3, 11, 0, "alpha", 0),
                        CO_FUTURE_ANNOTATIONS)
diff --git a/Lib/_bootlocale.py b/Lib/_bootlocale.py
deleted file mode 100644
index 3273a3b..0000000
--- a/Lib/_bootlocale.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""A minimal subset of the locale module used at interpreter startup
-(imported by the _io module), in order to reduce startup time.
-
-Don't import directly from third-party code; use the `locale` module instead!
-"""
-
-import sys
-import _locale
-
-if sys.platform.startswith("win"):
-    def getpreferredencoding(do_setlocale=True):
-        if sys.flags.utf8_mode:
-            return 'UTF-8'
-        return _locale._getdefaultlocale()[1]
-else:
-    try:
-        _locale.CODESET
-    except AttributeError:
-        if hasattr(sys, 'getandroidapilevel'):
-            # On Android langinfo.h and CODESET are missing, and UTF-8 is
-            # always used in mbstowcs() and wcstombs().
-            def getpreferredencoding(do_setlocale=True):
-                return 'UTF-8'
-        else:
-            def getpreferredencoding(do_setlocale=True):
-                if sys.flags.utf8_mode:
-                    return 'UTF-8'
-                # This path for legacy systems needs the more complex
-                # getdefaultlocale() function, import the full locale module.
-                import locale
-                return locale.getpreferredencoding(do_setlocale)
-    else:
-        def getpreferredencoding(do_setlocale=True):
-            assert not do_setlocale
-            if sys.flags.utf8_mode:
-                return 'UTF-8'
-            result = _locale.nl_langinfo(_locale.CODESET)
-            if not result and sys.platform == 'darwin':
-                # nl_langinfo can return an empty string
-                # when the setting has an invalid value.
-                # Default to UTF-8 in that case because
-                # UTF-8 is the default charset on OSX and
-                # returning nothing will crash the
-                # interpreter.
-                result = 'UTF-8'
-            return result
diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py
index 36cd993..40417dc 100644
--- a/Lib/_collections_abc.py
+++ b/Lib/_collections_abc.py
@@ -10,6 +10,10 @@
 import sys
 
 GenericAlias = type(list[int])
+EllipsisType = type(...)
+def _f(): pass
+FunctionType = type(_f)
+del _f
 
 __all__ = ["Awaitable", "Coroutine",
            "AsyncIterable", "AsyncIterator", "AsyncGenerator",
@@ -409,6 +413,143 @@
         return NotImplemented
 
 
+class _CallableGenericAlias(GenericAlias):
+    """ Represent `Callable[argtypes, resulttype]`.
+
+    This sets ``__args__`` to a tuple containing the flattened ``argtypes``
+    followed by ``resulttype``.
+
+    Example: ``Callable[[int, str], float]`` sets ``__args__`` to
+    ``(int, str, float)``.
+    """
+
+    __slots__ = ()
+
+    def __new__(cls, origin, args):
+        if not (isinstance(args, tuple) and len(args) == 2):
+            raise TypeError(
+                "Callable must be used as Callable[[arg, ...], result].")
+        t_args, t_result = args
+        if isinstance(t_args, list):
+            args = (*t_args, t_result)
+        elif not _is_param_expr(t_args):
+            raise TypeError(f"Expected a list of types, an ellipsis, "
+                            f"ParamSpec, or Concatenate. Got {t_args}")
+        return super().__new__(cls, origin, args)
+
+    @property
+    def __parameters__(self):
+        params = []
+        for arg in self.__args__:
+            # Looks like a genericalias
+            if hasattr(arg, "__parameters__") and isinstance(arg.__parameters__, tuple):
+                params.extend(arg.__parameters__)
+            else:
+                if _is_typevarlike(arg):
+                    params.append(arg)
+        return tuple(dict.fromkeys(params))
+
+    def __repr__(self):
+        if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]):
+            return super().__repr__()
+        return (f'collections.abc.Callable'
+                f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
+                f'{_type_repr(self.__args__[-1])}]')
+
+    def __reduce__(self):
+        args = self.__args__
+        if not (len(args) == 2 and _is_param_expr(args[0])):
+            args = list(args[:-1]), args[-1]
+        return _CallableGenericAlias, (Callable, args)
+
+    def __getitem__(self, item):
+        # Called during TypeVar substitution, returns the custom subclass
+        # rather than the default types.GenericAlias object.  Most of the
+        # code is copied from typing's _GenericAlias and the builtin
+        # types.GenericAlias.
+
+        # A special case in PEP 612 where if X = Callable[P, int],
+        # then X[int, str] == X[[int, str]].
+        param_len = len(self.__parameters__)
+        if param_len == 0:
+            raise TypeError(f'{self} is not a generic class')
+        if not isinstance(item, tuple):
+            item = (item,)
+        if (param_len == 1 and _is_param_expr(self.__parameters__[0])
+                and item and not _is_param_expr(item[0])):
+            item = (list(item),)
+        item_len = len(item)
+        if item_len != param_len:
+            raise TypeError(f'Too {"many" if item_len > param_len else "few"}'
+                            f' arguments for {self};'
+                            f' actual {item_len}, expected {param_len}')
+        subst = dict(zip(self.__parameters__, item))
+        new_args = []
+        for arg in self.__args__:
+            if _is_typevarlike(arg):
+                if _is_param_expr(arg):
+                    arg = subst[arg]
+                    if not _is_param_expr(arg):
+                        raise TypeError(f"Expected a list of types, an ellipsis, "
+                                        f"ParamSpec, or Concatenate. Got {arg}")
+                else:
+                    arg = subst[arg]
+            # Looks like a GenericAlias
+            elif hasattr(arg, '__parameters__') and isinstance(arg.__parameters__, tuple):
+                subparams = arg.__parameters__
+                if subparams:
+                    subargs = tuple(subst[x] for x in subparams)
+                    arg = arg[subargs]
+            if isinstance(arg, tuple):
+                new_args.extend(arg)
+            else:
+                new_args.append(arg)
+
+        # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612
+        if not isinstance(new_args[0], list):
+            t_result = new_args[-1]
+            t_args = new_args[:-1]
+            new_args = (t_args, t_result)
+        return _CallableGenericAlias(Callable, tuple(new_args))
+
+
+def _is_typevarlike(arg):
+    obj = type(arg)
+    # looks like a TypeVar/ParamSpec
+    return (obj.__module__ == 'typing'
+            and obj.__name__ in {'ParamSpec', 'TypeVar'})
+
+def _is_param_expr(obj):
+    """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or
+    ``_ConcatenateGenericAlias`` from typing.py
+    """
+    if obj is Ellipsis:
+        return True
+    if isinstance(obj, list):
+        return True
+    obj = type(obj)
+    names = ('ParamSpec', '_ConcatenateGenericAlias')
+    return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names)
+
+def _type_repr(obj):
+    """Return the repr() of an object, special-casing types (internal helper).
+
+    Copied from :mod:`typing` since collections.abc
+    shouldn't depend on that module.
+    """
+    if isinstance(obj, GenericAlias):
+        return repr(obj)
+    if isinstance(obj, type):
+        if obj.__module__ == 'builtins':
+            return obj.__qualname__
+        return f'{obj.__module__}.{obj.__qualname__}'
+    if obj is Ellipsis:
+        return '...'
+    if isinstance(obj, FunctionType):
+        return obj.__name__
+    return repr(obj)
+
+
 class Callable(metaclass=ABCMeta):
 
     __slots__ = ()
@@ -423,14 +564,13 @@
             return _check_methods(C, "__call__")
         return NotImplemented
 
-    __class_getitem__ = classmethod(GenericAlias)
+    __class_getitem__ = classmethod(_CallableGenericAlias)
 
 
 ### SETS ###
 
 
 class Set(Collection):
-
     """A set is a finite, iterable container.
 
     This class provides concrete generic implementations of all
@@ -558,6 +698,7 @@
             hx = hash(x)
             h ^= (hx ^ (hx << 16) ^ 89869747)  * 3644798167
             h &= MASK
+        h ^= (h >> 11) ^ (h >> 25)
         h = h * 69069 + 907133923
         h &= MASK
         if h > MAX:
@@ -655,19 +796,19 @@
 
 ### MAPPINGS ###
 
-
 class Mapping(Collection):
-
-    __slots__ = ()
-
     """A Mapping is a generic container for associating key/value
     pairs.
 
     This class provides concrete generic implementations of all
     methods except for __getitem__, __iter__, and __len__.
-
     """
 
+    __slots__ = ()
+
+    # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set.
+    __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING
+
     @abstractmethod
     def __getitem__(self, key):
         raise KeyError
@@ -706,7 +847,6 @@
 
     __reversed__ = None
 
-
 Mapping.register(mappingproxy)
 
 
@@ -731,7 +871,7 @@
     __slots__ = ()
 
     @classmethod
-    def _from_iterable(self, it):
+    def _from_iterable(cls, it):
         return set(it)
 
     def __contains__(self, key):
@@ -749,7 +889,7 @@
     __slots__ = ()
 
     @classmethod
-    def _from_iterable(self, it):
+    def _from_iterable(cls, it):
         return set(it)
 
     def __contains__(self, item):
@@ -789,18 +929,16 @@
 
 
 class MutableMapping(Mapping):
-
-    __slots__ = ()
-
     """A MutableMapping is a generic container for associating
     key/value pairs.
 
     This class provides concrete generic implementations of all
     methods except for __getitem__, __setitem__, __delitem__,
     __iter__, and __len__.
-
     """
 
+    __slots__ = ()
+
     @abstractmethod
     def __setitem__(self, key, value):
         raise KeyError
@@ -877,9 +1015,7 @@
 
 ### SEQUENCES ###
 
-
 class Sequence(Reversible, Collection):
-
     """All the operations on a read-only sequence.
 
     Concrete subclasses must override __new__ or __init__,
@@ -888,6 +1024,9 @@
 
     __slots__ = ()
 
+    # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set.
+    __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE
+
     @abstractmethod
     def __getitem__(self, index):
         raise IndexError
@@ -939,7 +1078,6 @@
         'S.count(value) -> integer -- return number of occurrences of value'
         return sum(1 for v in self if v is value or v == value)
 
-
 Sequence.register(tuple)
 Sequence.register(str)
 Sequence.register(range)
@@ -947,7 +1085,6 @@
 
 
 class ByteString(Sequence):
-
     """This unifies bytes and bytearray.
 
     XXX Should add all their methods.
@@ -960,16 +1097,14 @@
 
 
 class MutableSequence(Sequence):
-
-    __slots__ = ()
-
     """All the operations on a read-write sequence.
 
     Concrete subclasses must provide __new__ or __init__,
     __getitem__, __setitem__, __delitem__, __len__, and insert().
-
     """
 
+    __slots__ = ()
+
     @abstractmethod
     def __setitem__(self, index, value):
         raise IndexError
diff --git a/Lib/_compression.py b/Lib/_compression.py
index b00f31b..e8b70aa 100644
--- a/Lib/_compression.py
+++ b/Lib/_compression.py
@@ -1,7 +1,7 @@
 """Internal classes used by the gzip, lzma and bz2 modules"""
 
 import io
-
+import sys
 
 BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE  # Compressed data read chunk size
 
@@ -110,6 +110,16 @@
         self._pos += len(data)
         return data
 
+    def readall(self):
+        chunks = []
+        # sys.maxsize means the max length of output buffer is unlimited,
+        # so that the whole input buffer can be decompressed within one
+        # .decompress() call.
+        while data := self.read(sys.maxsize):
+            chunks.append(data)
+
+        return b"".join(chunks)
+
     # Rewind the file to the beginning of the data stream.
     def _rewind(self):
         self._fp.seek(0)
diff --git a/Lib/_markupbase.py b/Lib/_markupbase.py
index 2af5f1c..3ad7e27 100644
--- a/Lib/_markupbase.py
+++ b/Lib/_markupbase.py
@@ -29,10 +29,6 @@
             raise RuntimeError(
                 "_markupbase.ParserBase must be subclassed")
 
-    def error(self, message):
-        raise NotImplementedError(
-            "subclasses of ParserBase must override error()")
-
     def reset(self):
         self.lineno = 1
         self.offset = 0
@@ -131,12 +127,11 @@
                     # also in data attribute specifications of attlist declaration
                     # also link type declaration subsets in linktype declarations
                     # also link attribute specification lists in link declarations
-                    self.error("unsupported '[' char in %s declaration" % decltype)
+                    raise AssertionError("unsupported '[' char in %s declaration" % decltype)
                 else:
-                    self.error("unexpected '[' char in declaration")
+                    raise AssertionError("unexpected '[' char in declaration")
             else:
-                self.error(
-                    "unexpected %r char in declaration" % rawdata[j])
+                raise AssertionError("unexpected %r char in declaration" % rawdata[j])
             if j < 0:
                 return j
         return -1 # incomplete
@@ -156,7 +151,9 @@
             # look for MS Office ]> ending
             match= _msmarkedsectionclose.search(rawdata, i+3)
         else:
-            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
+            raise AssertionError(
+                'unknown status keyword %r in marked section' % rawdata[i+3:j]
+            )
         if not match:
             return -1
         if report:
@@ -168,7 +165,7 @@
     def parse_comment(self, i, report=1):
         rawdata = self.rawdata
         if rawdata[i:i+4] != '<!--':
-            self.error('unexpected call to parse_comment()')
+            raise AssertionError('unexpected call to parse_comment()')
         match = _commentclose.search(rawdata, i+4)
         if not match:
             return -1
@@ -192,7 +189,9 @@
                     return -1
                 if s != "<!":
                     self.updatepos(declstartpos, j + 1)
-                    self.error("unexpected char in internal subset (in %r)" % s)
+                    raise AssertionError(
+                        "unexpected char in internal subset (in %r)" % s
+                    )
                 if (j + 2) == n:
                     # end of buffer; incomplete
                     return -1
@@ -209,8 +208,9 @@
                     return -1
                 if name not in {"attlist", "element", "entity", "notation"}:
                     self.updatepos(declstartpos, j + 2)
-                    self.error(
-                        "unknown declaration %r in internal subset" % name)
+                    raise AssertionError(
+                        "unknown declaration %r in internal subset" % name
+                    )
                 # handle the individual names
                 meth = getattr(self, "_parse_doctype_" + name)
                 j = meth(j, declstartpos)
@@ -234,14 +234,14 @@
                     if rawdata[j] == ">":
                         return j
                     self.updatepos(declstartpos, j)
-                    self.error("unexpected char after internal subset")
+                    raise AssertionError("unexpected char after internal subset")
                 else:
                     return -1
             elif c.isspace():
                 j = j + 1
             else:
                 self.updatepos(declstartpos, j)
-                self.error("unexpected char %r in internal subset" % c)
+                raise AssertionError("unexpected char %r in internal subset" % c)
         # end of buffer reached
         return -1
 
@@ -387,8 +387,9 @@
             return name.lower(), m.end()
         else:
             self.updatepos(declstartpos, i)
-            self.error("expected name token at %r"
-                       % rawdata[declstartpos:declstartpos+20])
+            raise AssertionError(
+                "expected name token at %r" % rawdata[declstartpos:declstartpos+20]
+            )
 
     # To be overridden -- handlers for unknown objects
     def unknown_decl(self, data):
diff --git a/Lib/_osx_support.py b/Lib/_osx_support.py
index 37975fe..aa66c8b 100644
--- a/Lib/_osx_support.py
+++ b/Lib/_osx_support.py
@@ -96,7 +96,7 @@
     if _SYSTEM_VERSION is None:
         _SYSTEM_VERSION = ''
         try:
-            f = open('/System/Library/CoreServices/SystemVersion.plist')
+            f = open('/System/Library/CoreServices/SystemVersion.plist', encoding="utf-8")
         except OSError:
             # We're on a plain darwin box, fall back to the default
             # behaviour.
@@ -156,9 +156,9 @@
 
     if _cache_default_sysroot is not None:
         return _cache_default_sysroot
-   
+
     contents = _read_output('%s -c -E -v - </dev/null' % (cc,), True)
-    in_incdirs = False   
+    in_incdirs = False
     for line in contents.splitlines():
         if line.startswith("#include <...>"):
             in_incdirs = True
@@ -428,10 +428,9 @@
             break
 
     if sysroot and not os.path.isdir(sysroot):
-        from distutils import log
-        log.warn("Compiling with an SDK that doesn't seem to exist: %s",
-                sysroot)
-        log.warn("Please check your Xcode installation")
+        sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n")
+        sys.stderr.write("Please check your Xcode installation\n")
+        sys.stderr.flush()
 
     return compiler_so
 
@@ -482,7 +481,7 @@
 
     This customization is performed when the first
     extension module build is requested
-    in distutils.sysconfig.customize_compiler).
+    in distutils.sysconfig.customize_compiler.
     """
 
     # Find a compiler to use for extension module builds
@@ -525,10 +524,10 @@
             try:
                 macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
             except ValueError:
-                macrelease = (10, 0)
+                macrelease = (10, 3)
         else:
             # assume no universal support
-            macrelease = (10, 0)
+            macrelease = (10, 3)
 
         if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
             # The universal build will build fat binaries, but not on
diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py
index ab989e5..3d6cece 100644
--- a/Lib/_pydecimal.py
+++ b/Lib/_pydecimal.py
@@ -951,7 +951,7 @@
             if self.is_snan():
                 raise TypeError('Cannot hash a signaling NaN value.')
             elif self.is_nan():
-                return _PyHASH_NAN
+                return object.__hash__(self)
             else:
                 if self._sign:
                     return -_PyHASH_INF
diff --git a/Lib/_pyio.py b/Lib/_pyio.py
index 4804ed2..fb867fb 100644
--- a/Lib/_pyio.py
+++ b/Lib/_pyio.py
@@ -40,6 +40,36 @@
 _CHECK_ERRORS = _IOBASE_EMITS_UNRAISABLE
 
 
+def text_encoding(encoding, stacklevel=2):
+    """
+    A helper function to choose the text encoding.
+
+    When encoding is not None, just return it.
+    Otherwise, return the default text encoding (i.e. "locale").
+
+    This function emits an EncodingWarning if *encoding* is None and
+    sys.flags.warn_default_encoding is true.
+
+    This can be used in APIs with an encoding=None parameter
+    that pass it to TextIOWrapper or open.
+    However, please consider using encoding="utf-8" for new APIs.
+    """
+    if encoding is None:
+        encoding = "locale"
+        if sys.flags.warn_default_encoding:
+            import warnings
+            warnings.warn("'encoding' argument not specified.",
+                          EncodingWarning, stacklevel + 1)
+    return encoding
+
+
+# Wrapper for builtins.open
+#
+# Trick so that open() won't become a bound method when stored
+# as a class variable (as dbm.dumb does).
+#
+# See init_set_builtins_open() in Python/pylifecycle.c.
+@staticmethod
 def open(file, mode="r", buffering=-1, encoding=None, errors=None,
          newline=None, closefd=True, opener=None):
 
@@ -248,6 +278,7 @@
         result = buffer
         if binary:
             return result
+        encoding = text_encoding(encoding)
         text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
         result = text
         text.mode = mode
@@ -280,27 +311,20 @@
     open_code = _open_code_with_warning
 
 
-class DocDescriptor:
-    """Helper for builtins.open.__doc__
-    """
-    def __get__(self, obj, typ=None):
-        return (
-            "open(file, mode='r', buffering=-1, encoding=None, "
-                 "errors=None, newline=None, closefd=True)\n\n" +
-            open.__doc__)
-
-class OpenWrapper:
-    """Wrapper for builtins.open
-
-    Trick so that open won't become a bound method when stored
-    as a class variable (as dbm.dumb does).
-
-    See initstdio() in Python/pylifecycle.c.
-    """
-    __doc__ = DocDescriptor()
-
-    def __new__(cls, *args, **kwargs):
-        return open(*args, **kwargs)
+def __getattr__(name):
+    if name == "OpenWrapper":
+        # bpo-43680: Until Python 3.9, _pyio.open was not a static method and
+        # builtins.open was set to OpenWrapper to not become a bound method
+        # when set to a class variable. _io.open is a built-in function whereas
+        # _pyio.open is a Python function. In Python 3.10, _pyio.open() is now
+        # a static method, and builtins.open() is now io.open().
+        import warnings
+        warnings.warn('OpenWrapper is deprecated, use open instead',
+                      DeprecationWarning, stacklevel=2)
+        global OpenWrapper
+        OpenWrapper = open
+        return OpenWrapper
+    raise AttributeError(name)
 
 
 # In normal operation, both `UnsupportedOperation`s should be bound to the
@@ -314,8 +338,7 @@
 
 class IOBase(metaclass=abc.ABCMeta):
 
-    """The abstract base class for all I/O classes, acting on streams of
-    bytes. There is no public constructor.
+    """The abstract base class for all I/O classes.
 
     This class provides dummy implementations for many methods that
     derived classes can override selectively; the default implementations
@@ -1821,7 +1844,7 @@
     """Base class for text I/O.
 
     This class provides a character and line based interface to stream
-    I/O. There is no public constructor.
+    I/O.
     """
 
     def read(self, size=-1):
@@ -2004,19 +2027,22 @@
     def __init__(self, buffer, encoding=None, errors=None, newline=None,
                  line_buffering=False, write_through=False):
         self._check_newline(newline)
-        if encoding is None:
+        encoding = text_encoding(encoding)
+
+        if encoding == "locale":
             try:
-                encoding = os.device_encoding(buffer.fileno())
+                encoding = os.device_encoding(buffer.fileno()) or "locale"
             except (AttributeError, UnsupportedOperation):
                 pass
-            if encoding is None:
-                try:
-                    import locale
-                except ImportError:
-                    # Importing locale may fail if Python is being built
-                    encoding = "ascii"
-                else:
-                    encoding = locale.getpreferredencoding(False)
+
+        if encoding == "locale":
+            try:
+                import locale
+            except ImportError:
+                # Importing locale may fail if Python is being built
+                encoding = "utf-8"
+            else:
+                encoding = locale.getpreferredencoding(False)
 
         if not isinstance(encoding, str):
             raise ValueError("invalid encoding: %r" % encoding)
diff --git a/Lib/_sitebuiltins.py b/Lib/_sitebuiltins.py
index c29cf4b..c66269a 100644
--- a/Lib/_sitebuiltins.py
+++ b/Lib/_sitebuiltins.py
@@ -47,7 +47,7 @@
         data = None
         for filename in self.__filenames:
             try:
-                with open(filename, "r") as fp:
+                with open(filename, encoding='utf-8') as fp:
                     data = fp.read()
                 break
             except OSError:
diff --git a/Lib/_strptime.py b/Lib/_strptime.py
index 5df37f5..b97dfcc 100644
--- a/Lib/_strptime.py
+++ b/Lib/_strptime.py
@@ -201,7 +201,7 @@
             #XXX: Does 'Y' need to worry about having less or more than
             #     4 digits?
             'Y': r"(?P<Y>\d\d\d\d)",
-            'z': r"(?P<z>[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|Z)",
+            'z': r"(?P<z>[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|(?-i:Z))",
             'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
             'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
             'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
diff --git a/Lib/_weakrefset.py b/Lib/_weakrefset.py
index b267780..2a27684 100644
--- a/Lib/_weakrefset.py
+++ b/Lib/_weakrefset.py
@@ -51,10 +51,14 @@
             self.update(data)
 
     def _commit_removals(self):
-        l = self._pending_removals
+        pop = self._pending_removals.pop
         discard = self.data.discard
-        while l:
-            discard(l.pop())
+        while True:
+            try:
+                item = pop()
+            except IndexError:
+                return
+            discard(item)
 
     def __iter__(self):
         with _IterationGuard(self):
diff --git a/Lib/abc.py b/Lib/abc.py
index 431b640..3c552ce 100644
--- a/Lib/abc.py
+++ b/Lib/abc.py
@@ -28,7 +28,14 @@
 class abstractclassmethod(classmethod):
     """A decorator indicating abstract classmethods.
 
-    Deprecated, use 'classmethod' with 'abstractmethod' instead.
+    Deprecated, use 'classmethod' with 'abstractmethod' instead:
+
+        class C(ABC):
+            @classmethod
+            @abstractmethod
+            def my_abstract_classmethod(cls, ...):
+                ...
+
     """
 
     __isabstractmethod__ = True
@@ -41,7 +48,14 @@
 class abstractstaticmethod(staticmethod):
     """A decorator indicating abstract staticmethods.
 
-    Deprecated, use 'staticmethod' with 'abstractmethod' instead.
+    Deprecated, use 'staticmethod' with 'abstractmethod' instead:
+
+        class C(ABC):
+            @staticmethod
+            @abstractmethod
+            def my_abstract_staticmethod(...):
+                ...
+
     """
 
     __isabstractmethod__ = True
@@ -54,7 +68,14 @@
 class abstractproperty(property):
     """A decorator indicating abstract properties.
 
-    Deprecated, use 'property' with 'abstractmethod' instead.
+    Deprecated, use 'property' with 'abstractmethod' instead:
+
+        class C(ABC):
+            @property
+            @abstractmethod
+            def my_abstract_property(self):
+                ...
+
     """
 
     __isabstractmethod__ = True
@@ -122,6 +143,44 @@
             _reset_caches(cls)
 
 
+def update_abstractmethods(cls):
+    """Recalculate the set of abstract methods of an abstract class.
+
+    If a class has had one of its abstract methods implemented after the
+    class was created, the method will not be considered implemented until
+    this function is called. Alternatively, if a new abstract method has been
+    added to the class, it will only be considered an abstract method of the
+    class after this function is called.
+
+    This function should be called before any use is made of the class,
+    usually in class decorators that add methods to the subject class.
+
+    Returns cls, to allow usage as a class decorator.
+
+    If cls is not an instance of ABCMeta, does nothing.
+    """
+    if not hasattr(cls, '__abstractmethods__'):
+        # We check for __abstractmethods__ here because cls might by a C
+        # implementation or a python implementation (especially during
+        # testing), and we want to handle both cases.
+        return cls
+
+    abstracts = set()
+    # Check the existing abstract methods of the parents, keep only the ones
+    # that are not implemented.
+    for scls in cls.__bases__:
+        for name in getattr(scls, '__abstractmethods__', ()):
+            value = getattr(cls, name, None)
+            if getattr(value, "__isabstractmethod__", False):
+                abstracts.add(name)
+    # Also add any other newly added abstract methods.
+    for name, value in cls.__dict__.items():
+        if getattr(value, "__isabstractmethod__", False):
+            abstracts.add(name)
+    cls.__abstractmethods__ = frozenset(abstracts)
+    return cls
+
+
 class ABC(metaclass=ABCMeta):
     """Helper class that provides a standard way to create an ABC using
     inheritance.
diff --git a/Lib/argparse.py b/Lib/argparse.py
index 2fb1da5..2c0dd85 100644
--- a/Lib/argparse.py
+++ b/Lib/argparse.py
@@ -392,6 +392,9 @@
         group_actions = set()
         inserts = {}
         for group in groups:
+            if not group._group_actions:
+                raise ValueError(f'empty group {group}')
+
             try:
                 start = actions.index(group._group_actions[0])
             except ValueError:
@@ -526,12 +529,13 @@
         parts = [action_header]
 
         # if there was help for the action, add lines of help text
-        if action.help:
+        if action.help and action.help.strip():
             help_text = self._expand_help(action)
-            help_lines = self._split_lines(help_text, help_width)
-            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
-            for line in help_lines[1:]:
-                parts.append('%*s%s\n' % (help_position, '', line))
+            if help_text:
+                help_lines = self._split_lines(help_text, help_width)
+                parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+                for line in help_lines[1:]:
+                    parts.append('%*s%s\n' % (help_position, '', line))
 
         # or add a newline if the description doesn't end with one
         elif not action_header.endswith('\n'):
@@ -722,11 +726,13 @@
     if argument is None:
         return None
     elif argument.option_strings:
-        return  '/'.join(argument.option_strings)
+        return '/'.join(argument.option_strings)
     elif argument.metavar not in (None, SUPPRESS):
         return argument.metavar
     elif argument.dest not in (None, SUPPRESS):
         return argument.dest
+    elif argument.choices:
+        return '{' + ','.join(argument.choices) + '}'
     else:
         return None
 
@@ -872,8 +878,8 @@
                 option_string = '--no-' + option_string[2:]
                 _option_strings.append(option_string)
 
-        if help is not None and default is not None:
-            help += f" (default: {default})"
+        if help is not None and default is not None and default is not SUPPRESS:
+            help += " (default: %(default)s)"
 
         super().__init__(
             option_strings=_option_strings,
@@ -1250,9 +1256,9 @@
         # the special argument "-" means sys.std{in,out}
         if string == '-':
             if 'r' in self._mode:
-                return _sys.stdin
-            elif 'w' in self._mode:
-                return _sys.stdout
+                return _sys.stdin.buffer if 'b' in self._mode else _sys.stdin
+            elif any(c in self._mode for c in 'wax'):
+                return _sys.stdout.buffer if 'b' in self._mode else _sys.stdout
             else:
                 msg = _('argument "-" with mode %r') % self._mode
                 raise ValueError(msg)
@@ -1666,7 +1672,8 @@
     """Object for parsing command line strings into Python objects.
 
     Keyword Arguments:
-        - prog -- The name of the program (default: sys.argv[0])
+        - prog -- The name of the program (default:
+            ``os.path.basename(sys.argv[0])``)
         - usage -- A usage message (default: auto-generated from arguments)
         - description -- A description of what the program does
         - epilog -- Text following the argument descriptions
@@ -1719,7 +1726,7 @@
 
         add_group = self.add_argument_group
         self._positionals = add_group(_('positional arguments'))
-        self._optionals = add_group(_('optional arguments'))
+        self._optionals = add_group(_('options'))
         self._subparsers = None
 
         # register types
diff --git a/Lib/ast.py b/Lib/ast.py
index ecd4895..f4d2f6e 100644
--- a/Lib/ast.py
+++ b/Lib/ast.py
@@ -59,11 +59,14 @@
     sets, booleans, and None.
     """
     if isinstance(node_or_string, str):
-        node_or_string = parse(node_or_string, mode='eval')
+        node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval')
     if isinstance(node_or_string, Expression):
         node_or_string = node_or_string.body
     def _raise_malformed_node(node):
-        raise ValueError(f'malformed node or string: {node!r}')
+        msg = "malformed node or string"
+        if lno := getattr(node, 'lineno', None):
+            msg += f' on line {lno}'
+        raise ValueError(msg + f': {node!r}')
     def _convert_num(node):
         if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
             _raise_malformed_node(node)
@@ -794,6 +797,9 @@
         else:
             super().visit(node)
 
+    # Note: as visit() resets the output text, do NOT rely on
+    # NodeVisitor.generic_visit to handle any nodes (as it calls back in to
+    # the subclass visit() method, which resets self._source to an empty list)
     def visit(self, node):
         """Outputs a source code string that, if converted back to an ast
         (using ast.parse) will generate an AST equivalent to *node*"""
@@ -1196,8 +1202,13 @@
 
     def _write_constant(self, value):
         if isinstance(value, (float, complex)):
-            # Substitute overflowing decimal literal for AST infinities.
-            self.write(repr(value).replace("inf", _INFSTR))
+            # Substitute overflowing decimal literal for AST infinities,
+            # and inf - inf for NaNs.
+            self.write(
+                repr(value)
+                .replace("inf", _INFSTR)
+                .replace("nan", f"({_INFSTR}-{_INFSTR})")
+            )
         elif self._avoid_backslashes and isinstance(value, str):
             self._write_str_avoiding_backslashes(value)
         else:
@@ -1270,10 +1281,13 @@
             self.traverse(node.orelse)
 
     def visit_Set(self, node):
-        if not node.elts:
-            raise ValueError("Set node should have at least one item")
-        with self.delimit("{", "}"):
-            self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+        if node.elts:
+            with self.delimit("{", "}"):
+                self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+        else:
+            # `{}` would be interpreted as a dictionary literal, and
+            # `set` might be shadowed. Thus:
+            self.write('{*()}')
 
     def visit_Dict(self, node):
         def write_key_value_pair(k, v):
@@ -1440,9 +1454,9 @@
 
     def visit_Subscript(self, node):
         def is_simple_tuple(slice_value):
-            # when unparsing a non-empty tuple, the parantheses can be safely
+            # when unparsing a non-empty tuple, the parentheses can be safely
             # omitted if there aren't any elements that explicitly requires
-            # parantheses (such as starred expressions).
+            # parentheses (such as starred expressions).
             return (
                 isinstance(slice_value, Tuple)
                 and slice_value.elts
@@ -1475,6 +1489,13 @@
             self.write(":")
             self.traverse(node.step)
 
+    def visit_Match(self, node):
+        self.fill("match ")
+        self.traverse(node.subject)
+        with self.block():
+            for case in node.cases:
+                self.traverse(case)
+
     def visit_arg(self, node):
         self.write(node.arg)
         if node.annotation:
@@ -1559,6 +1580,94 @@
             self.write(" as ")
             self.traverse(node.optional_vars)
 
+    def visit_match_case(self, node):
+        self.fill("case ")
+        self.traverse(node.pattern)
+        if node.guard:
+            self.write(" if ")
+            self.traverse(node.guard)
+        with self.block():
+            self.traverse(node.body)
+
+    def visit_MatchValue(self, node):
+        self.traverse(node.value)
+
+    def visit_MatchSingleton(self, node):
+        self._write_constant(node.value)
+
+    def visit_MatchSequence(self, node):
+        with self.delimit("[", "]"):
+            self.interleave(
+                lambda: self.write(", "), self.traverse, node.patterns
+            )
+
+    def visit_MatchStar(self, node):
+        name = node.name
+        if name is None:
+            name = "_"
+        self.write(f"*{name}")
+
+    def visit_MatchMapping(self, node):
+        def write_key_pattern_pair(pair):
+            k, p = pair
+            self.traverse(k)
+            self.write(": ")
+            self.traverse(p)
+
+        with self.delimit("{", "}"):
+            keys = node.keys
+            self.interleave(
+                lambda: self.write(", "),
+                write_key_pattern_pair,
+                zip(keys, node.patterns, strict=True),
+            )
+            rest = node.rest
+            if rest is not None:
+                if keys:
+                    self.write(", ")
+                self.write(f"**{rest}")
+
+    def visit_MatchClass(self, node):
+        self.set_precedence(_Precedence.ATOM, node.cls)
+        self.traverse(node.cls)
+        with self.delimit("(", ")"):
+            patterns = node.patterns
+            self.interleave(
+                lambda: self.write(", "), self.traverse, patterns
+            )
+            attrs = node.kwd_attrs
+            if attrs:
+                def write_attr_pattern(pair):
+                    attr, pattern = pair
+                    self.write(f"{attr}=")
+                    self.traverse(pattern)
+
+                if patterns:
+                    self.write(", ")
+                self.interleave(
+                    lambda: self.write(", "),
+                    write_attr_pattern,
+                    zip(attrs, node.kwd_patterns, strict=True),
+                )
+
+    def visit_MatchAs(self, node):
+        name = node.name
+        pattern = node.pattern
+        if name is None:
+            self.write("_")
+        elif pattern is None:
+            self.write(node.name)
+        else:
+            with self.require_parens(_Precedence.TEST, node):
+                self.set_precedence(_Precedence.BOR, node.pattern)
+                self.traverse(node.pattern)
+                self.write(f" as {node.name}")
+
+    def visit_MatchOr(self, node):
+        with self.require_parens(_Precedence.BOR, node):
+            self.set_precedence(_Precedence.BOR.next(), *node.patterns)
+            self.interleave(lambda: self.write(" | "), self.traverse, node.patterns)
+
 def unparse(ast_obj):
     unparser = _Unparser()
     return unparser.visit(ast_obj)
diff --git a/Lib/asynchat.py b/Lib/asynchat.py
index f4ba361..de26ffa 100644
--- a/Lib/asynchat.py
+++ b/Lib/asynchat.py
@@ -48,6 +48,14 @@
 import asyncore
 from collections import deque
 
+from warnings import warn
+warn(
+    'The asynchat module is deprecated. '
+    'The recommended replacement is asyncio',
+    DeprecationWarning,
+    stacklevel=2)
+
+
 
 class async_chat(asyncore.dispatcher):
     """This is an abstract class.  You must derive from this class, and add
diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py
index eb84bfb..200b14c 100644
--- a/Lib/asyncio/__init__.py
+++ b/Lib/asyncio/__init__.py
@@ -20,10 +20,6 @@
 from .threads import *
 from .transports import *
 
-# Exposed for _asynciomodule.c to implement now deprecated
-# Task.all_tasks() method.  This function will be removed in 3.9.
-from .tasks import _all_tasks_compat  # NoQA
-
 __all__ = (base_events.__all__ +
            coroutines.__all__ +
            events.__all__ +
diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index b2d446a..952da11 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -49,7 +49,7 @@
 from .log import logger
 
 
-__all__ = 'BaseEventLoop',
+__all__ = 'BaseEventLoop','Server',
 
 
 # Minimum number of _scheduled timer handles before cleanup of
@@ -202,6 +202,11 @@
         pass
 
 
+def _check_ssl_socket(sock):
+    if ssl is not None and isinstance(sock, ssl.SSLSocket):
+        raise TypeError("Socket cannot be of type SSLSocket")
+
+
 class _SendfileFallbackProtocol(protocols.Protocol):
     def __init__(self, transp):
         if not isinstance(transp, transports._FlowControlMixin):
@@ -350,7 +355,7 @@
         self._start_serving()
         # Skip one loop iteration so that all 'loop.add_reader'
         # go through.
-        await tasks.sleep(0, loop=self._loop)
+        await tasks.sleep(0)
 
     async def serve_forever(self):
         if self._serving_forever_fut is not None:
@@ -541,8 +546,7 @@
 
         results = await tasks.gather(
             *[ag.aclose() for ag in closing_agens],
-            return_exceptions=True,
-            loop=self)
+            return_exceptions=True)
 
         for result, agen in zip(results, closing_agens):
             if isinstance(result, Exception):
@@ -864,6 +868,7 @@
                             *, fallback=True):
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
+        _check_ssl_socket(sock)
         self._check_sendfile_params(sock, file, offset, count)
         try:
             return await self._sock_sendfile_native(sock, file,
@@ -973,7 +978,7 @@
             happy_eyeballs_delay=None, interleave=None):
         """Connect to a TCP server.
 
-        Create a streaming transport connection to a given Internet host and
+        Create a streaming transport connection to a given internet host and
         port: socket family AF_INET or socket.AF_INET6 depending on host (or
         family if specified), socket type SOCK_STREAM. protocol_factory must be
         a callable returning a protocol instance.
@@ -1005,6 +1010,9 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
+        if sock is not None:
+            _check_ssl_socket(sock)
+
         if happy_eyeballs_delay is not None and interleave is None:
             # If using happy eyeballs, default to interleave addresses by family
             interleave = 1
@@ -1438,6 +1446,9 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
+        if sock is not None:
+            _check_ssl_socket(sock)
+
         if host is not None or port is not None:
             if sock is not None:
                 raise ValueError(
@@ -1457,7 +1468,7 @@
             fs = [self._create_server_getaddrinfo(host, port, family=family,
                                                   flags=flags)
                   for host in hosts]
-            infos = await tasks.gather(*fs, loop=self)
+            infos = await tasks.gather(*fs)
             infos = set(itertools.chain.from_iterable(infos))
 
             completed = False
@@ -1515,7 +1526,7 @@
             server._start_serving()
             # Skip one loop iteration so that all 'loop.add_reader'
             # go through.
-            await tasks.sleep(0, loop=self)
+            await tasks.sleep(0)
 
         if self._debug:
             logger.info("%r is serving", server)
@@ -1525,14 +1536,6 @@
             self, protocol_factory, sock,
             *, ssl=None,
             ssl_handshake_timeout=None):
-        """Handle an accepted connection.
-
-        This is used by servers that accept connections outside of
-        asyncio but that use asyncio to handle connections.
-
-        This method is a coroutine.  When completed, the coroutine
-        returns a (transport, protocol) pair.
-        """
         if sock.type != socket.SOCK_STREAM:
             raise ValueError(f'A Stream Socket was expected, got {sock!r}')
 
@@ -1540,6 +1543,9 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
+        if sock is not None:
+            _check_ssl_socket(sock)
+
         transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, '', server_side=True,
             ssl_handshake_timeout=ssl_handshake_timeout)
diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py
index 0dce87b..5ab1acc 100644
--- a/Lib/asyncio/events.py
+++ b/Lib/asyncio/events.py
@@ -258,13 +258,13 @@
         """Notification that a TimerHandle has been cancelled."""
         raise NotImplementedError
 
-    def call_soon(self, callback, *args):
-        return self.call_later(0, callback, *args)
+    def call_soon(self, callback, *args, context=None):
+        return self.call_later(0, callback, *args, context=context)
 
-    def call_later(self, delay, callback, *args):
+    def call_later(self, delay, callback, *args, context=None):
         raise NotImplementedError
 
-    def call_at(self, when, callback, *args):
+    def call_at(self, when, callback, *args, context=None):
         raise NotImplementedError
 
     def time(self):
@@ -280,7 +280,7 @@
 
     # Methods for interacting with threads.
 
-    def call_soon_threadsafe(self, callback, *args):
+    def call_soon_threadsafe(self, callback, *args, context=None):
         raise NotImplementedError
 
     def run_in_executor(self, executor, func, *args):
@@ -418,6 +418,20 @@
         """
         raise NotImplementedError
 
+    async def connect_accepted_socket(
+            self, protocol_factory, sock,
+            *, ssl=None,
+            ssl_handshake_timeout=None):
+        """Handle an accepted connection.
+
+        This is used by servers that accept connections outside of
+        asyncio, but use asyncio to handle connections.
+
+        This method is a coroutine.  When completed, the coroutine
+        returns a (transport, protocol) pair.
+        """
+        raise NotImplementedError
+
     async def create_datagram_endpoint(self, protocol_factory,
                                        local_addr=None, remote_addr=None, *,
                                        family=0, proto=0, flags=0,
@@ -465,7 +479,7 @@
         # The reason to accept file-like object instead of just file descriptor
         # is: we need to own pipe and close it at transport finishing
         # Can got complicated errors if pass f.fileno(),
-        # close fd in pipe transport then close f and vise versa.
+        # close fd in pipe transport then close f and vice versa.
         raise NotImplementedError
 
     async def connect_write_pipe(self, protocol_factory, pipe):
@@ -478,7 +492,7 @@
         # The reason to accept file-like object instead of just file descriptor
         # is: we need to own pipe and close it at transport finishing
         # Can got complicated errors if pass f.fileno(),
-        # close fd in pipe transport then close f and vise versa.
+        # close fd in pipe transport then close f and vice versa.
         raise NotImplementedError
 
     async def subprocess_shell(self, protocol_factory, cmd, *,
@@ -745,9 +759,16 @@
     the result of `get_event_loop_policy().get_event_loop()` call.
     """
     # NOTE: this function is implemented in C (see _asynciomodule.c)
+    return _py__get_event_loop()
+
+
+def _get_event_loop(stacklevel=3):
     current_loop = _get_running_loop()
     if current_loop is not None:
         return current_loop
+    import warnings
+    warnings.warn('There is no current event loop',
+                  DeprecationWarning, stacklevel=stacklevel)
     return get_event_loop_policy().get_event_loop()
 
 
@@ -777,6 +798,7 @@
 _py__set_running_loop = _set_running_loop
 _py_get_running_loop = get_running_loop
 _py_get_event_loop = get_event_loop
+_py__get_event_loop = _get_event_loop
 
 
 try:
@@ -784,7 +806,7 @@
     # functions in asyncio.  Pure Python implementation is
     # about 4 times slower than C-accelerated.
     from _asyncio import (_get_running_loop, _set_running_loop,
-                          get_running_loop, get_event_loop)
+                          get_running_loop, get_event_loop, _get_event_loop)
 except ImportError:
     pass
 else:
@@ -793,3 +815,4 @@
     _c__set_running_loop = _set_running_loop
     _c_get_running_loop = get_running_loop
     _c_get_event_loop = get_event_loop
+    _c__get_event_loop = _get_event_loop
diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py
index bed4da5..8e8cd87 100644
--- a/Lib/asyncio/futures.py
+++ b/Lib/asyncio/futures.py
@@ -8,6 +8,7 @@
 import contextvars
 import logging
 import sys
+from types import GenericAlias
 
 from . import base_futures
 from . import events
@@ -76,7 +77,7 @@
         the default event loop.
         """
         if loop is None:
-            self._loop = events.get_event_loop()
+            self._loop = events._get_event_loop()
         else:
             self._loop = loop
         self._callbacks = []
@@ -106,8 +107,7 @@
             context['source_traceback'] = self._source_traceback
         self._loop.call_exception_handler(context)
 
-    def __class_getitem__(cls, type):
-        return cls
+    __class_getitem__ = classmethod(GenericAlias)
 
     @property
     def _log_traceback(self):
@@ -115,7 +115,7 @@
 
     @_log_traceback.setter
     def _log_traceback(self, val):
-        if bool(val):
+        if val:
             raise ValueError('_log_traceback can only be set to False')
         self.__log_traceback = False
 
@@ -408,7 +408,7 @@
     assert isinstance(future, concurrent.futures.Future), \
         f'concurrent.futures.Future is expected, got {future!r}'
     if loop is None:
-        loop = events.get_event_loop()
+        loop = events._get_event_loop()
     new_future = loop.create_future()
     _chain_future(future, new_future)
     return new_future
diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py
index f1ce732..4fef64e 100644
--- a/Lib/asyncio/locks.py
+++ b/Lib/asyncio/locks.py
@@ -3,10 +3,9 @@
 __all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
 
 import collections
-import warnings
 
-from . import events
 from . import exceptions
+from . import mixins
 
 
 class _ContextManagerMixin:
@@ -20,7 +19,7 @@
         self.release()
 
 
-class Lock(_ContextManagerMixin):
+class Lock(_ContextManagerMixin, mixins._LoopBoundMixin):
     """Primitive lock objects.
 
     A primitive lock is a synchronization primitive that is not owned
@@ -74,16 +73,10 @@
 
     """
 
-    def __init__(self, *, loop=None):
+    def __init__(self, *, loop=mixins._marker):
+        super().__init__(loop=loop)
         self._waiters = None
         self._locked = False
-        if loop is None:
-            self._loop = events.get_event_loop()
-        else:
-            self._loop = loop
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -109,7 +102,7 @@
 
         if self._waiters is None:
             self._waiters = collections.deque()
-        fut = self._loop.create_future()
+        fut = self._get_loop().create_future()
         self._waiters.append(fut)
 
         # Finally block should be called before the CancelledError
@@ -161,7 +154,7 @@
             fut.set_result(True)
 
 
-class Event:
+class Event(mixins._LoopBoundMixin):
     """Asynchronous equivalent to threading.Event.
 
     Class implementing event objects. An event manages a flag that can be set
@@ -170,16 +163,10 @@
     false.
     """
 
-    def __init__(self, *, loop=None):
+    def __init__(self, *, loop=mixins._marker):
+        super().__init__(loop=loop)
         self._waiters = collections.deque()
         self._value = False
-        if loop is None:
-            self._loop = events.get_event_loop()
-        else:
-            self._loop = loop
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -220,7 +207,7 @@
         if self._value:
             return True
 
-        fut = self._loop.create_future()
+        fut = self._get_loop().create_future()
         self._waiters.append(fut)
         try:
             await fut
@@ -229,7 +216,7 @@
             self._waiters.remove(fut)
 
 
-class Condition(_ContextManagerMixin):
+class Condition(_ContextManagerMixin, mixins._LoopBoundMixin):
     """Asynchronous equivalent to threading.Condition.
 
     This class implements condition variable objects. A condition variable
@@ -239,19 +226,10 @@
     A new Lock object is created and used as the underlying lock.
     """
 
-    def __init__(self, lock=None, *, loop=None):
-        if loop is None:
-            self._loop = events.get_event_loop()
-        else:
-            self._loop = loop
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
-
+    def __init__(self, lock=None, *, loop=mixins._marker):
+        super().__init__(loop=loop)
         if lock is None:
-            lock = Lock(loop=loop)
-        elif lock._loop is not self._loop:
-            raise ValueError("loop argument must agree with lock")
+            lock = Lock()
 
         self._lock = lock
         # Export the lock's locked(), acquire() and release() methods.
@@ -284,7 +262,7 @@
 
         self.release()
         try:
-            fut = self._loop.create_future()
+            fut = self._get_loop().create_future()
             self._waiters.append(fut)
             try:
                 await fut
@@ -351,7 +329,7 @@
         self.notify(len(self._waiters))
 
 
-class Semaphore(_ContextManagerMixin):
+class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin):
     """A Semaphore implementation.
 
     A semaphore manages an internal counter which is decremented by each
@@ -366,18 +344,12 @@
     ValueError is raised.
     """
 
-    def __init__(self, value=1, *, loop=None):
+    def __init__(self, value=1, *, loop=mixins._marker):
+        super().__init__(loop=loop)
         if value < 0:
             raise ValueError("Semaphore initial value must be >= 0")
         self._value = value
         self._waiters = collections.deque()
-        if loop is None:
-            self._loop = events.get_event_loop()
-        else:
-            self._loop = loop
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -407,7 +379,7 @@
         True.
         """
         while self._value <= 0:
-            fut = self._loop.create_future()
+            fut = self._get_loop().create_future()
             self._waiters.append(fut)
             try:
                 await fut
@@ -436,12 +408,7 @@
     above the initial value.
     """
 
-    def __init__(self, value=1, *, loop=None):
-        if loop:
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
-
+    def __init__(self, value=1, *, loop=mixins._marker):
         self._bound_value = value
         super().__init__(value, loop=loop)
 
diff --git a/Lib/asyncio/mixins.py b/Lib/asyncio/mixins.py
new file mode 100644
index 0000000..650df05
--- /dev/null
+++ b/Lib/asyncio/mixins.py
@@ -0,0 +1,31 @@
+"""Event loop mixins."""
+
+import threading
+from . import events
+
+_global_lock = threading.Lock()
+
+# Used as a sentinel for loop parameter
+_marker = object()
+
+
+class _LoopBoundMixin:
+    _loop = None
+
+    def __init__(self, *, loop=_marker):
+        if loop is not _marker:
+            raise TypeError(
+                f'As of 3.10, the *loop* parameter was removed from '
+                f'{type(self).__name__}() since it is no longer necessary'
+            )
+
+    def _get_loop(self):
+        loop = events._get_running_loop()
+
+        if self._loop is None:
+            with _global_lock:
+                if self._loop is None:
+                    self._loop = loop
+        if loop is not self._loop:
+            raise RuntimeError(f'{self!r} is bound to a different event loop')
+        return loop
diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py
index b4cd414..411685b 100644
--- a/Lib/asyncio/proactor_events.py
+++ b/Lib/asyncio/proactor_events.py
@@ -158,7 +158,7 @@
             # end then it may fail with ERROR_NETNAME_DELETED if we
             # just close our end.  First calling shutdown() seems to
             # cure it, but maybe using DisconnectEx() would be better.
-            if hasattr(self._sock, 'shutdown'):
+            if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
                 self._sock.shutdown(socket.SHUT_RDWR)
             self._sock.close()
             self._sock = None
@@ -179,11 +179,12 @@
     """Transport for read pipes."""
 
     def __init__(self, loop, sock, protocol, waiter=None,
-                 extra=None, server=None):
-        self._pending_data = None
+                 extra=None, server=None, buffer_size=65536):
+        self._pending_data_length = -1
         self._paused = True
         super().__init__(loop, sock, protocol, waiter, extra, server)
 
+        self._data = bytearray(buffer_size)
         self._loop.call_soon(self._loop_reading)
         self._paused = False
 
@@ -217,12 +218,12 @@
         if self._read_fut is None:
             self._loop.call_soon(self._loop_reading, None)
 
-        data = self._pending_data
-        self._pending_data = None
-        if data is not None:
+        length = self._pending_data_length
+        self._pending_data_length = -1
+        if length > -1:
             # Call the protocol methode after calling _loop_reading(),
             # since the protocol can decide to pause reading again.
-            self._loop.call_soon(self._data_received, data)
+            self._loop.call_soon(self._data_received, self._data[:length], length)
 
         if self._loop.get_debug():
             logger.debug("%r resumes reading", self)
@@ -243,15 +244,15 @@
         if not keep_open:
             self.close()
 
-    def _data_received(self, data):
+    def _data_received(self, data, length):
         if self._paused:
             # Don't call any protocol method while reading is paused.
             # The protocol will be called on resume_reading().
-            assert self._pending_data is None
-            self._pending_data = data
+            assert self._pending_data_length == -1
+            self._pending_data_length = length
             return
 
-        if not data:
+        if length == 0:
             self._eof_received()
             return
 
@@ -269,6 +270,7 @@
             self._protocol.data_received(data)
 
     def _loop_reading(self, fut=None):
+        length = -1
         data = None
         try:
             if fut is not None:
@@ -277,18 +279,18 @@
                 self._read_fut = None
                 if fut.done():
                     # deliver data later in "finally" clause
-                    data = fut.result()
+                    length = fut.result()
+                    if length == 0:
+                        # we got end-of-file so no need to reschedule a new read
+                        return
+
+                    data = self._data[:length]
                 else:
                     # the future will be replaced by next proactor.recv call
                     fut.cancel()
 
             if self._closing:
                 # since close() has been called we ignore any read data
-                data = None
-                return
-
-            if data == b'':
-                # we got end-of-file so no need to reschedule a new read
                 return
 
             # bpo-33694: buffer_updated() has currently no fast path because of
@@ -296,7 +298,7 @@
 
             if not self._paused:
                 # reschedule a new read
-                self._read_fut = self._loop._proactor.recv(self._sock, 32768)
+                self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
         except ConnectionAbortedError as exc:
             if not self._closing:
                 self._fatal_error(exc, 'Fatal read error on pipe transport')
@@ -314,8 +316,8 @@
             if not self._paused:
                 self._read_fut.add_done_callback(self._loop_reading)
         finally:
-            if data is not None:
-                self._data_received(data)
+            if length > -1:
+                self._data_received(data, length)
 
 
 class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
@@ -450,7 +452,8 @@
             self.close()
 
 
-class _ProactorDatagramTransport(_ProactorBasePipeTransport):
+class _ProactorDatagramTransport(_ProactorBasePipeTransport,
+                                 transports.DatagramTransport):
     max_size = 256 * 1024
     def __init__(self, loop, sock, protocol, address=None,
                  waiter=None, extra=None):
diff --git a/Lib/asyncio/protocols.py b/Lib/asyncio/protocols.py
index 69fa43e..09987b1 100644
--- a/Lib/asyncio/protocols.py
+++ b/Lib/asyncio/protocols.py
@@ -109,10 +109,6 @@
 class BufferedProtocol(BaseProtocol):
     """Interface for stream protocol with manual buffer control.
 
-    Important: this has been added to asyncio in Python 3.7
-    *on a provisional basis*!  Consider it as an experimental API that
-    might be changed or removed in Python 3.8.
-
     Event methods, such as `create_server` and `create_connection`,
     accept factories that return protocols that implement this interface.
 
diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py
index cd3f7c6..10dd689 100644
--- a/Lib/asyncio/queues.py
+++ b/Lib/asyncio/queues.py
@@ -2,10 +2,10 @@
 
 import collections
 import heapq
-import warnings
+from types import GenericAlias
 
-from . import events
 from . import locks
+from . import mixins
 
 
 class QueueEmpty(Exception):
@@ -18,7 +18,7 @@
     pass
 
 
-class Queue:
+class Queue(mixins._LoopBoundMixin):
     """A queue, useful for coordinating producer and consumer coroutines.
 
     If maxsize is less than or equal to zero, the queue size is infinite. If it
@@ -30,14 +30,8 @@
     interrupted between calling qsize() and doing an operation on the Queue.
     """
 
-    def __init__(self, maxsize=0, *, loop=None):
-        if loop is None:
-            self._loop = events.get_event_loop()
-        else:
-            self._loop = loop
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
+    def __init__(self, maxsize=0, *, loop=mixins._marker):
+        super().__init__(loop=loop)
         self._maxsize = maxsize
 
         # Futures.
@@ -45,7 +39,7 @@
         # Futures.
         self._putters = collections.deque()
         self._unfinished_tasks = 0
-        self._finished = locks.Event(loop=loop)
+        self._finished = locks.Event()
         self._finished.set()
         self._init(maxsize)
 
@@ -76,8 +70,7 @@
     def __str__(self):
         return f'<{type(self).__name__} {self._format()}>'
 
-    def __class_getitem__(cls, type):
-        return cls
+    __class_getitem__ = classmethod(GenericAlias)
 
     def _format(self):
         result = f'maxsize={self._maxsize!r}'
@@ -122,7 +115,7 @@
         slot is available before adding item.
         """
         while self.full():
-            putter = self._loop.create_future()
+            putter = self._get_loop().create_future()
             self._putters.append(putter)
             try:
                 await putter
@@ -160,7 +153,7 @@
         If queue is empty, wait until an item is available.
         """
         while self.empty():
-            getter = self._loop.create_future()
+            getter = self._get_loop().create_future()
             self._getters.append(getter)
             try:
                 await getter
diff --git a/Lib/asyncio/runners.py b/Lib/asyncio/runners.py
index 268635d..9a5e9a4 100644
--- a/Lib/asyncio/runners.py
+++ b/Lib/asyncio/runners.py
@@ -60,8 +60,7 @@
     for task in to_cancel:
         task.cancel()
 
-    loop.run_until_complete(
-        tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
+    loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
 
     for task in to_cancel:
         if task.cancelled():
diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py
index 59cb6b1..71080b8 100644
--- a/Lib/asyncio/selector_events.py
+++ b/Lib/asyncio/selector_events.py
@@ -40,11 +40,6 @@
         return bool(key.events & event)
 
 
-def _check_ssl_socket(sock):
-    if ssl is not None and isinstance(sock, ssl.SSLSocket):
-        raise TypeError("Socket cannot be of type SSLSocket")
-
-
 class BaseSelectorEventLoop(base_events.BaseEventLoop):
     """Selector event loop.
 
@@ -357,7 +352,7 @@
         The maximum amount of data to be received at once is specified by
         nbytes.
         """
-        _check_ssl_socket(sock)
+        base_events._check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -398,7 +393,7 @@
         The received data is written into *buf* (a writable buffer).
         The return value is the number of bytes written.
         """
-        _check_ssl_socket(sock)
+        base_events._check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -439,7 +434,7 @@
         raised, and there is no way to determine how much data, if any, was
         successfully processed by the receiving end of the connection.
         """
-        _check_ssl_socket(sock)
+        base_events._check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -488,13 +483,15 @@
 
         This method is a coroutine.
         """
-        _check_ssl_socket(sock)
+        base_events._check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
 
         if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
             resolved = await self._ensure_resolved(
-                address, family=sock.family, proto=sock.proto, loop=self)
+                address, family=sock.family, type=sock.type, proto=sock.proto,
+                loop=self,
+            )
             _, _, _, _, address = resolved[0]
 
         fut = self.create_future()
@@ -553,7 +550,7 @@
         object usable to send and receive data on the connection, and address
         is the address bound to the socket on the other end of the connection.
         """
-        _check_ssl_socket(sock)
+        base_events._check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         fut = self.create_future()
diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py
index cad25b2..00fc16c 100644
--- a/Lib/asyncio/sslproto.py
+++ b/Lib/asyncio/sslproto.py
@@ -367,6 +367,12 @@
         """Return the current size of the write buffer."""
         return self._ssl_protocol._transport.get_write_buffer_size()
 
+    def get_write_buffer_limits(self):
+        """Get the high and low watermarks for write flow control. 
+        Return a tuple (low, high) where low and high are 
+        positive number of bytes."""
+        return self._ssl_protocol._transport.get_write_buffer_limits()
+
     @property
     def _protocol_paused(self):
         # Required for sendfile fallback pause_writing/resume_writing logic
diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index 3c80bb8..080d8a6 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -23,7 +23,7 @@
 
 
 async def open_connection(host=None, port=None, *,
-                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
+                          limit=_DEFAULT_LIMIT, **kwds):
     """A wrapper for create_connection() returning a (reader, writer) pair.
 
     The reader returned is a StreamReader instance; the writer is a
@@ -41,12 +41,7 @@
     StreamReaderProtocol classes, just copy the code -- there's
     really nothing special here except some convenience.)
     """
-    if loop is None:
-        loop = events.get_event_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
+    loop = events.get_running_loop()
     reader = StreamReader(limit=limit, loop=loop)
     protocol = StreamReaderProtocol(reader, loop=loop)
     transport, _ = await loop.create_connection(
@@ -56,7 +51,7 @@
 
 
 async def start_server(client_connected_cb, host=None, port=None, *,
-                       loop=None, limit=_DEFAULT_LIMIT, **kwds):
+                       limit=_DEFAULT_LIMIT, **kwds):
     """Start a socket server, call back for each client connected.
 
     The first parameter, `client_connected_cb`, takes two parameters:
@@ -78,12 +73,7 @@
     The return value is the same as loop.create_server(), i.e. a
     Server object which can be used to stop the service.
     """
-    if loop is None:
-        loop = events.get_event_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
+    loop = events.get_running_loop()
 
     def factory():
         reader = StreamReader(limit=limit, loop=loop)
@@ -98,14 +88,10 @@
     # UNIX Domain Sockets are supported on this platform
 
     async def open_unix_connection(path=None, *,
-                                   loop=None, limit=_DEFAULT_LIMIT, **kwds):
+                                   limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `open_connection` but works with UNIX Domain Sockets."""
-        if loop is None:
-            loop = events.get_event_loop()
-        else:
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
+        loop = events.get_running_loop()
+
         reader = StreamReader(limit=limit, loop=loop)
         protocol = StreamReaderProtocol(reader, loop=loop)
         transport, _ = await loop.create_unix_connection(
@@ -114,14 +100,9 @@
         return reader, writer
 
     async def start_unix_server(client_connected_cb, path=None, *,
-                                loop=None, limit=_DEFAULT_LIMIT, **kwds):
+                                limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `start_server` but works with UNIX Domain Sockets."""
-        if loop is None:
-            loop = events.get_event_loop()
-        else:
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
+        loop = events.get_running_loop()
 
         def factory():
             reader = StreamReader(limit=limit, loop=loop)
@@ -144,7 +125,7 @@
 
     def __init__(self, loop=None):
         if loop is None:
-            self._loop = events.get_event_loop()
+            self._loop = events._get_event_loop(stacklevel=4)
         else:
             self._loop = loop
         self._paused = False
@@ -302,9 +283,13 @@
     def __del__(self):
         # Prevent reports about unhandled exceptions.
         # Better than self._closed._log_traceback = False hack
-        closed = self._closed
-        if closed.done() and not closed.cancelled():
-            closed.exception()
+        try:
+            closed = self._closed
+        except AttributeError:
+            pass  # failed constructor
+        else:
+            if closed.done() and not closed.cancelled():
+                closed.exception()
 
 
 class StreamWriter:
@@ -400,7 +385,7 @@
 
         self._limit = limit
         if loop is None:
-            self._loop = events.get_event_loop()
+            self._loop = events._get_event_loop()
         else:
             self._loop = loop
         self._buffer = bytearray()
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index c9506b1..cd10231 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -1,7 +1,6 @@
 __all__ = 'create_subprocess_exec', 'create_subprocess_shell'
 
 import subprocess
-import warnings
 
 from . import events
 from . import protocols
@@ -193,24 +192,14 @@
             stderr = self._read_stream(2)
         else:
             stderr = self._noop()
-        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
-                                                   loop=self._loop)
+        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
         await self.wait()
         return (stdout, stderr)
 
 
 async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
-                                  loop=None, limit=streams._DEFAULT_LIMIT,
-                                  **kwds):
-    if loop is None:
-        loop = events.get_event_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8 "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning,
-                      stacklevel=2
-        )
-
+                                  limit=streams._DEFAULT_LIMIT, **kwds):
+    loop = events.get_running_loop()
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
     transport, protocol = await loop.subprocess_shell(
@@ -221,16 +210,9 @@
 
 
 async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
-                                 stderr=None, loop=None,
-                                 limit=streams._DEFAULT_LIMIT, **kwds):
-    if loop is None:
-        loop = events.get_event_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8 "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning,
-                      stacklevel=2
-        )
+                                 stderr=None, limit=streams._DEFAULT_LIMIT,
+                                 **kwds):
+    loop = events.get_running_loop()
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
     transport, protocol = await loop.subprocess_exec(
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index f486b67..c4bedb5 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -17,6 +17,7 @@
 import types
 import warnings
 import weakref
+from types import GenericAlias
 
 from . import base_tasks
 from . import coroutines
@@ -61,30 +62,6 @@
             if futures._get_loop(t) is loop and not t.done()}
 
 
-def _all_tasks_compat(loop=None):
-    # Different from "all_task()" by returning *all* Tasks, including
-    # the completed ones.  Used to implement deprecated "Tasks.all_task()"
-    # method.
-    if loop is None:
-        loop = events.get_event_loop()
-    # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
-    # thread while we do so. Therefore we cast it to list prior to filtering. The list
-    # cast itself requires iteration, so we repeat it several times ignoring
-    # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
-    # details.
-    i = 0
-    while True:
-        try:
-            tasks = list(_all_tasks)
-        except RuntimeError:
-            i += 1
-            if i >= 1000:
-                raise
-        else:
-            break
-    return {t for t in tasks if futures._get_loop(t) is loop}
-
-
 def _set_task_name(task, name):
     if name is not None:
         try:
@@ -147,8 +124,7 @@
             self._loop.call_exception_handler(context)
         super().__del__()
 
-    def __class_getitem__(cls, type):
-        return cls
+    __class_getitem__ = classmethod(GenericAlias)
 
     def _repr_info(self):
         return base_tasks._task_repr_info(self)
@@ -370,7 +346,7 @@
 ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
 
 
-async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED):
     """Wait for the Futures and coroutines given by fs to complete.
 
     The fs iterable must not be empty.
@@ -393,12 +369,7 @@
     if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
         raise ValueError(f'Invalid return_when value: {return_when}')
 
-    if loop is None:
-        loop = events.get_running_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
+    loop = events.get_running_loop()
 
     fs = set(fs)
 
@@ -418,7 +389,7 @@
         waiter.set_result(None)
 
 
-async def wait_for(fut, timeout, *, loop=None):
+async def wait_for(fut, timeout):
     """Wait for the single Future or coroutine to complete, with timeout.
 
     Coroutine will be wrapped in Task.
@@ -431,12 +402,7 @@
 
     This function is a coroutine.
     """
-    if loop is None:
-        loop = events.get_running_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
+    loop = events.get_running_loop()
 
     if timeout is None:
         return await fut
@@ -449,11 +415,9 @@
 
         await _cancel_and_wait(fut, loop=loop)
         try:
-            fut.result()
+            return fut.result()
         except exceptions.CancelledError as exc:
             raise exceptions.TimeoutError() from exc
-        else:
-            raise exceptions.TimeoutError()
 
     waiter = loop.create_future()
     timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -471,7 +435,10 @@
                 return fut.result()
             else:
                 fut.remove_done_callback(cb)
-                fut.cancel()
+                # We must ensure that the task is not running
+                # after wait_for() returns.
+                # See https://bugs.python.org/issue32751
+                await _cancel_and_wait(fut, loop=loop)
                 raise
 
         if fut.done():
@@ -486,11 +453,9 @@
             # exception, we should re-raise it
             # See https://bugs.python.org/issue40607
             try:
-                fut.result()
+                return fut.result()
             except exceptions.CancelledError as exc:
                 raise exceptions.TimeoutError() from exc
-            else:
-                raise exceptions.TimeoutError()
     finally:
         timeout_handle.cancel()
 
@@ -556,7 +521,7 @@
 
 
 # This is *not* a @coroutine!  It is just an iterator (yielding Futures).
-def as_completed(fs, *, loop=None, timeout=None):
+def as_completed(fs, *, timeout=None):
     """Return an iterator whose values are coroutines.
 
     When waiting for the yielded coroutines you'll get the results (or
@@ -578,14 +543,9 @@
         raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
 
     from .queues import Queue  # Import here to avoid circular import problem.
-    done = Queue(loop=loop)
+    done = Queue()
 
-    if loop is None:
-        loop = events.get_event_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
+    loop = events._get_event_loop()
     todo = {ensure_future(f, loop=loop) for f in set(fs)}
     timeout_handle = None
 
@@ -630,19 +590,13 @@
     yield
 
 
-async def sleep(delay, result=None, *, loop=None):
+async def sleep(delay, result=None):
     """Coroutine that completes after a given time (in seconds)."""
     if delay <= 0:
         await __sleep0()
         return result
 
-    if loop is None:
-        loop = events.get_running_loop()
-    else:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
-
+    loop = events.get_running_loop()
     future = loop.create_future()
     h = loop.call_later(delay,
                         futures._set_result_unless_cancelled,
@@ -658,23 +612,32 @@
 
     If the argument is a Future, it is returned directly.
     """
-    if coroutines.iscoroutine(coro_or_future):
-        if loop is None:
-            loop = events.get_event_loop()
-        task = loop.create_task(coro_or_future)
-        if task._source_traceback:
-            del task._source_traceback[-1]
-        return task
-    elif futures.isfuture(coro_or_future):
+    return _ensure_future(coro_or_future, loop=loop)
+
+
+def _ensure_future(coro_or_future, *, loop=None):
+    if futures.isfuture(coro_or_future):
         if loop is not None and loop is not futures._get_loop(coro_or_future):
             raise ValueError('The future belongs to a different loop than '
-                             'the one specified as the loop argument')
+                            'the one specified as the loop argument')
         return coro_or_future
-    elif inspect.isawaitable(coro_or_future):
-        return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
-    else:
-        raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
-                        'required')
+    called_wrap_awaitable = False
+    if not coroutines.iscoroutine(coro_or_future):
+        if inspect.isawaitable(coro_or_future):
+            coro_or_future = _wrap_awaitable(coro_or_future)
+            called_wrap_awaitable = True
+        else:
+            raise TypeError('An asyncio.Future, a coroutine or an awaitable '
+                            'is required')
+
+    if loop is None:
+        loop = events._get_event_loop(stacklevel=4)
+    try:
+        return loop.create_task(coro_or_future)
+    except RuntimeError: 
+        if not called_wrap_awaitable:
+            coro_or_future.close()
+        raise
 
 
 @types.coroutine
@@ -697,7 +660,8 @@
     cancelled.
     """
 
-    def __init__(self, children, *, loop=None):
+    def __init__(self, children, *, loop):
+        assert loop is not None
         super().__init__(loop=loop)
         self._children = children
         self._cancel_requested = False
@@ -717,7 +681,7 @@
         return ret
 
 
-def gather(*coros_or_futures, loop=None, return_exceptions=False):
+def gather(*coros_or_futures, return_exceptions=False):
     """Return a future aggregating results from the given coroutines/futures.
 
     Coroutines will be wrapped in a future and scheduled in the event
@@ -748,12 +712,7 @@
     gather won't cancel any other awaitables.
     """
     if not coros_or_futures:
-        if loop is None:
-            loop = events.get_event_loop()
-        else:
-            warnings.warn("The loop argument is deprecated since Python 3.8, "
-                          "and scheduled for removal in Python 3.10.",
-                          DeprecationWarning, stacklevel=2)
+        loop = events._get_event_loop()
         outer = loop.create_future()
         outer.set_result([])
         return outer
@@ -762,7 +721,7 @@
         nonlocal nfinished
         nfinished += 1
 
-        if outer.done():
+        if outer is None or outer.done():
             if not fut.cancelled():
                 # Mark exception retrieved.
                 fut.exception()
@@ -817,9 +776,11 @@
     children = []
     nfuts = 0
     nfinished = 0
+    loop = None
+    outer = None  # bpo-46672
     for arg in coros_or_futures:
         if arg not in arg_to_fut:
-            fut = ensure_future(arg, loop=loop)
+            fut = _ensure_future(arg, loop=loop)
             if loop is None:
                 loop = futures._get_loop(fut)
             if fut is not arg:
@@ -843,7 +804,7 @@
     return outer
 
 
-def shield(arg, *, loop=None):
+def shield(arg):
     """Wait for a future, shielding it from cancellation.
 
     The statement
@@ -869,11 +830,7 @@
         except CancelledError:
             res = None
     """
-    if loop is not None:
-        warnings.warn("The loop argument is deprecated since Python 3.8, "
-                      "and scheduled for removal in Python 3.10.",
-                      DeprecationWarning, stacklevel=2)
-    inner = ensure_future(arg, loop=loop)
+    inner = _ensure_future(arg)
     if inner.done():
         # Shortcut.
         return inner
diff --git a/Lib/asyncio/threads.py b/Lib/asyncio/threads.py
index 34b7513..db048a8 100644
--- a/Lib/asyncio/threads.py
+++ b/Lib/asyncio/threads.py
@@ -13,7 +13,7 @@
     """Asynchronously run function *func* in a separate thread.
 
     Any *args and **kwargs supplied for this function are directly passed
-    to *func*. Also, the current :class:`contextvars.Context` is propogated,
+    to *func*. Also, the current :class:`contextvars.Context` is propagated,
     allowing context variables from the main thread to be accessed in the
     separate thread.
 
diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py
index 45e155c..73b1fa2 100644
--- a/Lib/asyncio/transports.py
+++ b/Lib/asyncio/transports.py
@@ -99,6 +99,12 @@
         """Return the current size of the write buffer."""
         raise NotImplementedError
 
+    def get_write_buffer_limits(self):
+        """Get the high and low watermarks for write flow control. 
+        Return a tuple (low, high) where low and high are 
+        positive number of bytes."""
+        raise NotImplementedError
+
     def write(self, data):
         """Write some data bytes to the transport.
 
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index f34a5b4..c88b818 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -44,6 +44,16 @@
     pass
 
 
+def waitstatus_to_exitcode(status):
+    try:
+        return os.waitstatus_to_exitcode(status)
+    except ValueError:
+        # The child exited, but we don't understand its status.
+        # This shouldn't happen, but if it does, let's just
+        # return that status; perhaps that helps debug it.
+        return status
+
+
 class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
     """Unix event loop.
 
@@ -323,7 +333,7 @@
             server._start_serving()
             # Skip one loop iteration so that all 'loop.add_reader'
             # go through.
-            await tasks.sleep(0, loop=self)
+            await tasks.sleep(0)
 
         return server
 
@@ -941,7 +951,7 @@
                 " will report returncode 255",
                 pid)
         else:
-            returncode = _compute_returncode(status)
+            returncode = waitstatus_to_exitcode(status)
 
         os.close(pidfd)
         callback(pid, returncode, *args)
@@ -956,20 +966,6 @@
         return True
 
 
-def _compute_returncode(status):
-    if os.WIFSIGNALED(status):
-        # The child process died because of a signal.
-        return -os.WTERMSIG(status)
-    elif os.WIFEXITED(status):
-        # The child process exited (e.g sys.exit()).
-        return os.WEXITSTATUS(status)
-    else:
-        # The child exited, but we don't understand its status.
-        # This shouldn't happen, but if it does, let's just
-        # return that status; perhaps that helps debug it.
-        return status
-
-
 class BaseChildWatcher(AbstractChildWatcher):
 
     def __init__(self):
@@ -1080,7 +1076,7 @@
                 # The child process is still alive.
                 return
 
-            returncode = _compute_returncode(status)
+            returncode = waitstatus_to_exitcode(status)
             if self._loop.get_debug():
                 logger.debug('process %s exited with returncode %s',
                              expected_pid, returncode)
@@ -1173,7 +1169,7 @@
                     # A child process is still alive.
                     return
 
-                returncode = _compute_returncode(status)
+                returncode = waitstatus_to_exitcode(status)
 
             with self._lock:
                 try:
@@ -1230,13 +1226,15 @@
 
     def close(self):
         self._callbacks.clear()
-        if self._saved_sighandler is not None:
-            handler = signal.getsignal(signal.SIGCHLD)
-            if handler != self._sig_chld:
-                logger.warning("SIGCHLD handler was changed by outside code")
-            else:
-                signal.signal(signal.SIGCHLD, self._saved_sighandler)
-            self._saved_sighandler = None
+        if self._saved_sighandler is None:
+            return
+
+        handler = signal.getsignal(signal.SIGCHLD)
+        if handler != self._sig_chld:
+            logger.warning("SIGCHLD handler was changed by outside code")
+        else:
+            signal.signal(signal.SIGCHLD, self._saved_sighandler)
+        self._saved_sighandler = None
 
     def __enter__(self):
         return self
@@ -1263,15 +1261,17 @@
         # The reason to do it here is that attach_loop() is called from
         # unix policy only for the main thread.
         # Main thread is required for subscription on SIGCHLD signal
-        if self._saved_sighandler is None:
-            self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
-            if self._saved_sighandler is None:
-                logger.warning("Previous SIGCHLD handler was set by non-Python code, "
-                               "restore to default handler on watcher close.")
-                self._saved_sighandler = signal.SIG_DFL
+        if self._saved_sighandler is not None:
+            return
 
-            # Set SA_RESTART to limit EINTR occurrences.
-            signal.siginterrupt(signal.SIGCHLD, False)
+        self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
+        if self._saved_sighandler is None:
+            logger.warning("Previous SIGCHLD handler was set by non-Python code, "
+                           "restore to default handler on watcher close.")
+            self._saved_sighandler = signal.SIG_DFL
+
+        # Set SA_RESTART to limit EINTR occurrences.
+        signal.siginterrupt(signal.SIGCHLD, False)
 
     def _do_waitpid_all(self):
         for pid in list(self._callbacks):
@@ -1296,7 +1296,7 @@
                 # The child process is still alive.
                 return
 
-            returncode = _compute_returncode(status)
+            returncode = waitstatus_to_exitcode(status)
             debug_log = True
         try:
             loop, callback, args = self._callbacks.pop(pid)
@@ -1379,7 +1379,7 @@
     def remove_child_handler(self, pid):
         # asyncio never calls remove_child_handler() !!!
         # The method is no-op but is implemented because
-        # abstract base classe requires it
+        # abstract base classes require it.
         return True
 
     def attach_loop(self, loop):
@@ -1399,7 +1399,7 @@
                 "Unknown child process pid %d, will report returncode 255",
                 pid)
         else:
-            returncode = _compute_returncode(status)
+            returncode = waitstatus_to_exitcode(status)
             if loop.get_debug():
                 logger.debug('process %s exited with returncode %s',
                              expected_pid, returncode)
diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py
index 5e7cd79..da81ab4 100644
--- a/Lib/asyncio/windows_events.py
+++ b/Lib/asyncio/windows_events.py
@@ -1,5 +1,10 @@
 """Selector and proactor event loops for Windows."""
 
+import sys
+
+if sys.platform != 'win32':  # pragma: no cover
+    raise ImportError('win32 only')
+
 import _overlapped
 import _winapi
 import errno
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
index ce16f11..b1eea4b 100644
--- a/Lib/asyncore.py
+++ b/Lib/asyncore.py
@@ -57,6 +57,13 @@
      ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
      errorcode
 
+warnings.warn(
+    'The asyncore module is deprecated. '
+    'The recommended replacement is asyncio',
+    DeprecationWarning,
+    stacklevel=2)
+
+
 _DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
                            EBADF})
 
@@ -113,7 +120,7 @@
         if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
             obj.handle_close()
     except OSError as e:
-        if e.args[0] not in _DISCONNECTED:
+        if e.errno not in _DISCONNECTED:
             obj.handle_error()
         else:
             obj.handle_close()
@@ -236,7 +243,7 @@
             try:
                 self.addr = sock.getpeername()
             except OSError as err:
-                if err.args[0] in (ENOTCONN, EINVAL):
+                if err.errno in (ENOTCONN, EINVAL):
                     # To handle the case where we got an unconnected
                     # socket.
                     self.connected = False
@@ -346,7 +353,7 @@
         except TypeError:
             return None
         except OSError as why:
-            if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
+            if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
                 return None
             else:
                 raise
@@ -358,9 +365,9 @@
             result = self.socket.send(data)
             return result
         except OSError as why:
-            if why.args[0] == EWOULDBLOCK:
+            if why.errno == EWOULDBLOCK:
                 return 0
-            elif why.args[0] in _DISCONNECTED:
+            elif why.errno in _DISCONNECTED:
                 self.handle_close()
                 return 0
             else:
@@ -378,7 +385,7 @@
                 return data
         except OSError as why:
             # winsock sometimes raises ENOTCONN
-            if why.args[0] in _DISCONNECTED:
+            if why.errno in _DISCONNECTED:
                 self.handle_close()
                 return b''
             else:
@@ -393,7 +400,7 @@
             try:
                 self.socket.close()
             except OSError as why:
-                if why.args[0] not in (ENOTCONN, EBADF):
+                if why.errno not in (ENOTCONN, EBADF):
                     raise
 
     # log and log_info may be overridden to provide more sophisticated
@@ -557,7 +564,7 @@
         try:
             x.close()
         except OSError as x:
-            if x.args[0] == EBADF:
+            if x.errno == EBADF:
                 pass
             elif not ignore_all:
                 raise
diff --git a/Lib/base64.py b/Lib/base64.py
index a28109f..e1256ad 100644
--- a/Lib/base64.py
+++ b/Lib/base64.py
@@ -16,7 +16,7 @@
     'encode', 'decode', 'encodebytes', 'decodebytes',
     # Generalized interface for other encodings
     'b64encode', 'b64decode', 'b32encode', 'b32decode',
-    'b16encode', 'b16decode',
+    'b32hexencode', 'b32hexdecode', 'b16encode', 'b16decode',
     # Base85 and Ascii85 encodings
     'b85encode', 'b85decode', 'a85encode', 'a85decode',
     # Standard Base64 encoding
@@ -135,19 +135,40 @@
 
 
 # Base32 encoding/decoding must be done in Python
-_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
-_b32tab2 = None
-_b32rev = None
+_B32_ENCODE_DOCSTRING = '''
+Encode the bytes-like objects using {encoding} and return a bytes object.
+'''
+_B32_DECODE_DOCSTRING = '''
+Decode the {encoding} encoded bytes-like object or ASCII string s.
 
-def b32encode(s):
-    """Encode the bytes-like object s using Base32 and return a bytes object.
-    """
+Optional casefold is a flag specifying whether a lowercase alphabet is
+acceptable as input.  For security purposes, the default is False.
+{extra_args}
+The result is returned as a bytes object.  A binascii.Error is raised if
+the input is incorrectly padded or if there are non-alphabet
+characters present in the input.
+'''
+_B32_DECODE_MAP01_DOCSTRING = '''
+RFC 3548 allows for optional mapping of the digit 0 (zero) to the
+letter O (oh), and for optional mapping of the digit 1 (one) to
+either the letter I (eye) or letter L (el).  The optional argument
+map01 when not None, specifies which letter the digit 1 should be
+mapped to (when map01 is not None, the digit 0 is always mapped to
+the letter O).  For security purposes the default is None, so that
+0 and 1 are not allowed in the input.
+'''
+_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
+_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
+_b32tab2 = {}
+_b32rev = {}
+
+def _b32encode(alphabet, s):
     global _b32tab2
     # Delay the initialization of the table to not waste memory
     # if the function is never called
-    if _b32tab2 is None:
-        b32tab = [bytes((i,)) for i in _b32alphabet]
-        _b32tab2 = [a + b for a in b32tab for b in b32tab]
+    if alphabet not in _b32tab2:
+        b32tab = [bytes((i,)) for i in alphabet]
+        _b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab]
         b32tab = None
 
     if not isinstance(s, bytes_types):
@@ -158,7 +179,7 @@
         s = s + b'\0' * (5 - leftover)  # Don't use += !
     encoded = bytearray()
     from_bytes = int.from_bytes
-    b32tab2 = _b32tab2
+    b32tab2 = _b32tab2[alphabet]
     for i in range(0, len(s), 5):
         c = from_bytes(s[i: i + 5], 'big')
         encoded += (b32tab2[c >> 30] +           # bits 1 - 10
@@ -177,29 +198,12 @@
         encoded[-1:] = b'='
     return bytes(encoded)
 
-def b32decode(s, casefold=False, map01=None):
-    """Decode the Base32 encoded bytes-like object or ASCII string s.
-
-    Optional casefold is a flag specifying whether a lowercase alphabet is
-    acceptable as input.  For security purposes, the default is False.
-
-    RFC 3548 allows for optional mapping of the digit 0 (zero) to the
-    letter O (oh), and for optional mapping of the digit 1 (one) to
-    either the letter I (eye) or letter L (el).  The optional argument
-    map01 when not None, specifies which letter the digit 1 should be
-    mapped to (when map01 is not None, the digit 0 is always mapped to
-    the letter O).  For security purposes the default is None, so that
-    0 and 1 are not allowed in the input.
-
-    The result is returned as a bytes object.  A binascii.Error is raised if
-    the input is incorrectly padded or if there are non-alphabet
-    characters present in the input.
-    """
+def _b32decode(alphabet, s, casefold=False, map01=None):
     global _b32rev
     # Delay the initialization of the table to not waste memory
     # if the function is never called
-    if _b32rev is None:
-        _b32rev = {v: k for k, v in enumerate(_b32alphabet)}
+    if alphabet not in _b32rev:
+        _b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)}
     s = _bytes_from_decode_data(s)
     if len(s) % 8:
         raise binascii.Error('Incorrect padding')
@@ -220,7 +224,7 @@
     padchars = l - len(s)
     # Now decode the full quanta
     decoded = bytearray()
-    b32rev = _b32rev
+    b32rev = _b32rev[alphabet]
     for i in range(0, len(s), 8):
         quanta = s[i: i + 8]
         acc = 0
@@ -241,6 +245,26 @@
     return bytes(decoded)
 
 
+def b32encode(s):
+    return _b32encode(_b32alphabet, s)
+b32encode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32')
+
+def b32decode(s, casefold=False, map01=None):
+    return _b32decode(_b32alphabet, s, casefold, map01)
+b32decode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32',
+                                        extra_args=_B32_DECODE_MAP01_DOCSTRING)
+
+def b32hexencode(s):
+    return _b32encode(_b32hexalphabet, s)
+b32hexencode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32hex')
+
+def b32hexdecode(s, casefold=False):
+    # base32hex does not have the 01 mapping
+    return _b32decode(_b32hexalphabet, s, casefold)
+b32hexdecode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32hex',
+                                                    extra_args='')
+
+
 # RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
 # lowercase.  The RFC also recommends against accepting input case
 # insensitively.
@@ -320,7 +344,7 @@
     global _a85chars, _a85chars2
     # Delay the initialization of tables to not waste memory
     # if the function is never called
-    if _a85chars is None:
+    if _a85chars2 is None:
         _a85chars = [bytes((i,)) for i in range(33, 118)]
         _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
 
@@ -428,7 +452,7 @@
     global _b85chars, _b85chars2
     # Delay the initialization of tables to not waste memory
     # if the function is never called
-    if _b85chars is None:
+    if _b85chars2 is None:
         _b85chars = [bytes((i,)) for i in _b85alphabet]
         _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
     return _85encode(b, _b85chars, _b85chars2, pad)
diff --git a/Lib/bdb.py b/Lib/bdb.py
index b18a061..75d6113 100644
--- a/Lib/bdb.py
+++ b/Lib/bdb.py
@@ -34,6 +34,8 @@
         self.fncache = {}
         self.frame_returning = None
 
+        self._load_breaks()
+
     def canonic(self, filename):
         """Return canonical form of filename.
 
@@ -117,7 +119,7 @@
         """Invoke user function and return trace function for call event.
 
         If the debugger stops on this function call, invoke
-        self.user_call(). Raise BbdQuit if self.quitting is set.
+        self.user_call(). Raise BdbQuit if self.quitting is set.
         Return self.trace_dispatch to continue tracing in this scope.
         """
         # XXX 'arg' is no longer used
@@ -365,6 +367,12 @@
     # Call self.get_*break*() to see the breakpoints or better
     # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
 
+    def _add_to_breaks(self, filename, lineno):
+        """Add breakpoint to breaks, if not already there."""
+        bp_linenos = self.breaks.setdefault(filename, [])
+        if lineno not in bp_linenos:
+            bp_linenos.append(lineno)
+
     def set_break(self, filename, lineno, temporary=False, cond=None,
                   funcname=None):
         """Set a new breakpoint for filename:lineno.
@@ -377,12 +385,21 @@
         line = linecache.getline(filename, lineno)
         if not line:
             return 'Line %s:%d does not exist' % (filename, lineno)
-        list = self.breaks.setdefault(filename, [])
-        if lineno not in list:
-            list.append(lineno)
+        self._add_to_breaks(filename, lineno)
         bp = Breakpoint(filename, lineno, temporary, cond, funcname)
         return None
 
+    def _load_breaks(self):
+        """Apply all breakpoints (set in other instances) to this one.
+
+        Populates this instance's breaks list from the Breakpoint class's
+        list, which can have breakpoints set by another Bdb instance. This
+        is necessary for interactive sessions to keep the breakpoints
+        active across multiple calls to run().
+        """
+        for (filename, lineno) in Breakpoint.bplist.keys():
+            self._add_to_breaks(filename, lineno)
+
     def _prune_breaks(self, filename, lineno):
         """Prune breakpoints for filename:lineno.
 
@@ -681,6 +698,12 @@
         else:
             self.bplist[file, line] = [self]
 
+    @staticmethod
+    def clearBreakpoints():
+        Breakpoint.next = 1
+        Breakpoint.bplist = {}
+        Breakpoint.bpbynumber = [None]
+
     def deleteMe(self):
         """Delete the breakpoint from the list associated to a file:line.
 
diff --git a/Lib/bisect.py b/Lib/bisect.py
index 8f3f6a3..d37da74 100644
--- a/Lib/bisect.py
+++ b/Lib/bisect.py
@@ -1,6 +1,7 @@
 """Bisection algorithms."""
 
-def insort_right(a, x, lo=0, hi=None):
+
+def insort_right(a, x, lo=0, hi=None, *, key=None):
     """Insert item x in list a, and keep it sorted assuming a is sorted.
 
     If x is already in a, insert it to the right of the rightmost x.
@@ -8,15 +9,18 @@
     Optional args lo (default 0) and hi (default len(a)) bound the
     slice of a to be searched.
     """
-
-    lo = bisect_right(a, x, lo, hi)
+    if key is None:
+        lo = bisect_right(a, x, lo, hi)
+    else:
+        lo = bisect_right(a, key(x), lo, hi, key=key)
     a.insert(lo, x)
 
-def bisect_right(a, x, lo=0, hi=None):
+
+def bisect_right(a, x, lo=0, hi=None, *, key=None):
     """Return the index where to insert item x in list a, assuming a is sorted.
 
     The return value i is such that all e in a[:i] have e <= x, and all e in
-    a[i:] have e > x.  So if x already appears in the list, a.insert(x) will
+    a[i:] have e > x.  So if x already appears in the list, a.insert(i, x) will
     insert just after the rightmost x already there.
 
     Optional args lo (default 0) and hi (default len(a)) bound the
@@ -27,14 +31,26 @@
         raise ValueError('lo must be non-negative')
     if hi is None:
         hi = len(a)
-    while lo < hi:
-        mid = (lo+hi)//2
-        # Use __lt__ to match the logic in list.sort() and in heapq
-        if x < a[mid]: hi = mid
-        else: lo = mid+1
+    # Note, the comparison uses "<" to match the
+    # __lt__() logic in list.sort() and in heapq.
+    if key is None:
+        while lo < hi:
+            mid = (lo + hi) // 2
+            if x < a[mid]:
+                hi = mid
+            else:
+                lo = mid + 1
+    else:
+        while lo < hi:
+            mid = (lo + hi) // 2
+            if x < key(a[mid]):
+                hi = mid
+            else:
+                lo = mid + 1
     return lo
 
-def insort_left(a, x, lo=0, hi=None):
+
+def insort_left(a, x, lo=0, hi=None, *, key=None):
     """Insert item x in list a, and keep it sorted assuming a is sorted.
 
     If x is already in a, insert it to the left of the leftmost x.
@@ -43,15 +59,17 @@
     slice of a to be searched.
     """
 
-    lo = bisect_left(a, x, lo, hi)
+    if key is None:
+        lo = bisect_left(a, x, lo, hi)
+    else:
+        lo = bisect_left(a, key(x), lo, hi, key=key)
     a.insert(lo, x)
 
-
-def bisect_left(a, x, lo=0, hi=None):
+def bisect_left(a, x, lo=0, hi=None, *, key=None):
     """Return the index where to insert item x in list a, assuming a is sorted.
 
     The return value i is such that all e in a[:i] have e < x, and all e in
-    a[i:] have e >= x.  So if x already appears in the list, a.insert(x) will
+    a[i:] have e >= x.  So if x already appears in the list, a.insert(i, x) will
     insert just before the leftmost x already there.
 
     Optional args lo (default 0) and hi (default len(a)) bound the
@@ -62,13 +80,25 @@
         raise ValueError('lo must be non-negative')
     if hi is None:
         hi = len(a)
-    while lo < hi:
-        mid = (lo+hi)//2
-        # Use __lt__ to match the logic in list.sort() and in heapq
-        if a[mid] < x: lo = mid+1
-        else: hi = mid
+    # Note, the comparison uses "<" to match the
+    # __lt__() logic in list.sort() and in heapq.
+    if key is None:
+        while lo < hi:
+            mid = (lo + hi) // 2
+            if a[mid] < x:
+                lo = mid + 1
+            else:
+                hi = mid
+    else:
+        while lo < hi:
+            mid = (lo + hi) // 2
+            if key(a[mid]) < x:
+                lo = mid + 1
+            else:
+                hi = mid
     return lo
 
+
 # Overwrite above definitions with a fast C implementation
 try:
     from _bisect import *
diff --git a/Lib/bz2.py b/Lib/bz2.py
index ce07ebe..fabe4f7 100644
--- a/Lib/bz2.py
+++ b/Lib/bz2.py
@@ -13,7 +13,6 @@
 import io
 import os
 import _compression
-from threading import RLock
 
 from _bz2 import BZ2Compressor, BZ2Decompressor
 
@@ -53,9 +52,6 @@
         If mode is 'r', the input file may be the concatenation of
         multiple compressed streams.
         """
-        # This lock must be recursive, so that BufferedIOBase's
-        # writelines() does not deadlock.
-        self._lock = RLock()
         self._fp = None
         self._closefp = False
         self._mode = _MODE_CLOSED
@@ -104,24 +100,23 @@
         May be called more than once without error. Once the file is
         closed, any other operation on it will raise a ValueError.
         """
-        with self._lock:
-            if self._mode == _MODE_CLOSED:
-                return
+        if self._mode == _MODE_CLOSED:
+            return
+        try:
+            if self._mode == _MODE_READ:
+                self._buffer.close()
+            elif self._mode == _MODE_WRITE:
+                self._fp.write(self._compressor.flush())
+                self._compressor = None
+        finally:
             try:
-                if self._mode == _MODE_READ:
-                    self._buffer.close()
-                elif self._mode == _MODE_WRITE:
-                    self._fp.write(self._compressor.flush())
-                    self._compressor = None
+                if self._closefp:
+                    self._fp.close()
             finally:
-                try:
-                    if self._closefp:
-                        self._fp.close()
-                finally:
-                    self._fp = None
-                    self._closefp = False
-                    self._mode = _MODE_CLOSED
-                    self._buffer = None
+                self._fp = None
+                self._closefp = False
+                self._mode = _MODE_CLOSED
+                self._buffer = None
 
     @property
     def closed(self):
@@ -153,12 +148,11 @@
         Always returns at least one byte of data, unless at EOF.
         The exact number of bytes returned is unspecified.
         """
-        with self._lock:
-            self._check_can_read()
-            # Relies on the undocumented fact that BufferedReader.peek()
-            # always returns at least one byte (except at EOF), independent
-            # of the value of n
-            return self._buffer.peek(n)
+        self._check_can_read()
+        # Relies on the undocumented fact that BufferedReader.peek()
+        # always returns at least one byte (except at EOF), independent
+        # of the value of n
+        return self._buffer.peek(n)
 
     def read(self, size=-1):
         """Read up to size uncompressed bytes from the file.
@@ -166,9 +160,8 @@
         If size is negative or omitted, read until EOF is reached.
         Returns b'' if the file is already at EOF.
         """
-        with self._lock:
-            self._check_can_read()
-            return self._buffer.read(size)
+        self._check_can_read()
+        return self._buffer.read(size)
 
     def read1(self, size=-1):
         """Read up to size uncompressed bytes, while trying to avoid
@@ -177,20 +170,18 @@
 
         Returns b'' if the file is at EOF.
         """
-        with self._lock:
-            self._check_can_read()
-            if size < 0:
-                size = io.DEFAULT_BUFFER_SIZE
-            return self._buffer.read1(size)
+        self._check_can_read()
+        if size < 0:
+            size = io.DEFAULT_BUFFER_SIZE
+        return self._buffer.read1(size)
 
     def readinto(self, b):
         """Read bytes into b.
 
         Returns the number of bytes read (0 for EOF).
         """
-        with self._lock:
-            self._check_can_read()
-            return self._buffer.readinto(b)
+        self._check_can_read()
+        return self._buffer.readinto(b)
 
     def readline(self, size=-1):
         """Read a line of uncompressed bytes from the file.
@@ -203,9 +194,8 @@
             if not hasattr(size, "__index__"):
                 raise TypeError("Integer argument expected")
             size = size.__index__()
-        with self._lock:
-            self._check_can_read()
-            return self._buffer.readline(size)
+        self._check_can_read()
+        return self._buffer.readline(size)
 
     def readlines(self, size=-1):
         """Read a list of lines of uncompressed bytes from the file.
@@ -218,23 +208,29 @@
             if not hasattr(size, "__index__"):
                 raise TypeError("Integer argument expected")
             size = size.__index__()
-        with self._lock:
-            self._check_can_read()
-            return self._buffer.readlines(size)
+        self._check_can_read()
+        return self._buffer.readlines(size)
 
     def write(self, data):
         """Write a byte string to the file.
 
         Returns the number of uncompressed bytes written, which is
-        always len(data). Note that due to buffering, the file on disk
-        may not reflect the data written until close() is called.
+        always the length of data in bytes. Note that due to buffering,
+        the file on disk may not reflect the data written until close()
+        is called.
         """
-        with self._lock:
-            self._check_can_write()
-            compressed = self._compressor.compress(data)
-            self._fp.write(compressed)
-            self._pos += len(data)
-            return len(data)
+        self._check_can_write()
+        if isinstance(data, (bytes, bytearray)):
+            length = len(data)
+        else:
+            # accept any data that supports the buffer protocol
+            data = memoryview(data)
+            length = data.nbytes
+
+        compressed = self._compressor.compress(data)
+        self._fp.write(compressed)
+        self._pos += length
+        return length
 
     def writelines(self, seq):
         """Write a sequence of byte strings to the file.
@@ -244,8 +240,7 @@
 
         Line separators are not added between the written byte strings.
         """
-        with self._lock:
-            return _compression.BaseStream.writelines(self, seq)
+        return _compression.BaseStream.writelines(self, seq)
 
     def seek(self, offset, whence=io.SEEK_SET):
         """Change the file position.
@@ -262,17 +257,15 @@
         Note that seeking is emulated, so depending on the parameters,
         this operation may be extremely slow.
         """
-        with self._lock:
-            self._check_can_seek()
-            return self._buffer.seek(offset, whence)
+        self._check_can_seek()
+        return self._buffer.seek(offset, whence)
 
     def tell(self):
         """Return the current file position."""
-        with self._lock:
-            self._check_not_closed()
-            if self._mode == _MODE_READ:
-                return self._buffer.tell()
-            return self._pos
+        self._check_not_closed()
+        if self._mode == _MODE_READ:
+            return self._buffer.tell()
+        return self._pos
 
 
 def open(filename, mode="rb", compresslevel=9,
@@ -311,6 +304,7 @@
     binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
 
     if "t" in mode:
+        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
diff --git a/Lib/cProfile.py b/Lib/cProfile.py
index 59b4699..22a7d0a 100644
--- a/Lib/cProfile.py
+++ b/Lib/cProfile.py
@@ -175,7 +175,12 @@
                 '__package__': None,
                 '__cached__': None,
             }
-        runctx(code, globs, None, options.outfile, options.sort)
+        try:
+            runctx(code, globs, None, options.outfile, options.sort)
+        except BrokenPipeError as exc:
+            # Prevent "Exception ignored" during interpreter shutdown.
+            sys.stdout = None
+            sys.exit(exc.errno)
     else:
         parser.print_usage()
     return parser
diff --git a/Lib/calendar.py b/Lib/calendar.py
index 7550d52..cbea9ec 100644
--- a/Lib/calendar.py
+++ b/Lib/calendar.py
@@ -15,7 +15,9 @@
            "monthcalendar", "prmonth", "month", "prcal", "calendar",
            "timegm", "month_name", "month_abbr", "day_name", "day_abbr",
            "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
-           "LocaleHTMLCalendar", "weekheader"]
+           "LocaleHTMLCalendar", "weekheader",
+           "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY",
+           "SATURDAY", "SUNDAY"]
 
 # Exception raised for bad input (with string parameter for details)
 error = ValueError
@@ -571,19 +573,11 @@
 
     def formatweekday(self, day, width):
         with different_locale(self.locale):
-            if width >= 9:
-                names = day_name
-            else:
-                names = day_abbr
-            name = names[day]
-            return name[:width].center(width)
+            return super().formatweekday(day, width)
 
     def formatmonthname(self, theyear, themonth, width, withyear=True):
         with different_locale(self.locale):
-            s = month_name[themonth]
-            if withyear:
-                s = "%s %r" % (s, theyear)
-            return s.center(width)
+            return super().formatmonthname(theyear, themonth, width, withyear)
 
 
 class LocaleHTMLCalendar(HTMLCalendar):
@@ -601,16 +595,11 @@
 
     def formatweekday(self, day):
         with different_locale(self.locale):
-            s = day_abbr[day]
-            return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
+            return super().formatweekday(day)
 
     def formatmonthname(self, theyear, themonth, withyear=True):
         with different_locale(self.locale):
-            s = month_name[themonth]
-            if withyear:
-                s = '%s %s' % (s, theyear)
-            return '<tr><th colspan="7" class="month">%s</th></tr>' % s
-
+            return super().formatmonthname(theyear, themonth, withyear)
 
 # Support for old module level interface
 c = TextCalendar()
diff --git a/Lib/cgi.py b/Lib/cgi.py
index 77ab703..6cb8cf2 100644
--- a/Lib/cgi.py
+++ b/Lib/cgi.py
@@ -41,6 +41,7 @@
 import html
 import locale
 import tempfile
+import warnings
 
 __all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart",
            "parse_header", "test", "print_exception", "print_environ",
@@ -77,9 +78,11 @@
 
     """
     global log, logfile, logfp
+    warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead",
+                  DeprecationWarning, stacklevel=2)
     if logfile and not logfp:
         try:
-            logfp = open(logfile, "a")
+            logfp = open(logfile, "a", encoding="locale")
         except OSError:
             pass
     if not logfp:
@@ -115,7 +118,8 @@
 # 0 ==> unlimited input
 maxlen = 0
 
-def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+def parse(fp=None, environ=os.environ, keep_blank_values=0,
+          strict_parsing=0, separator='&'):
     """Parse a query in the environment or from a file (default stdin)
 
         Arguments, all optional:
@@ -134,6 +138,9 @@
         strict_parsing: flag indicating what to do with parsing errors.
             If false (the default), errors are silently ignored.
             If true, errors raise a ValueError exception.
+
+        separator: str. The symbol to use for separating the query arguments.
+            Defaults to &.
     """
     if fp is None:
         fp = sys.stdin
@@ -154,7 +161,7 @@
     if environ['REQUEST_METHOD'] == 'POST':
         ctype, pdict = parse_header(environ['CONTENT_TYPE'])
         if ctype == 'multipart/form-data':
-            return parse_multipart(fp, pdict)
+            return parse_multipart(fp, pdict, separator=separator)
         elif ctype == 'application/x-www-form-urlencoded':
             clength = int(environ['CONTENT_LENGTH'])
             if maxlen and clength > maxlen:
@@ -178,10 +185,10 @@
             qs = ""
         environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
     return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
-                                 encoding=encoding)
+                                 encoding=encoding, separator=separator)
 
 
-def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"):
+def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'):
     """Parse multipart input.
 
     Arguments:
@@ -194,7 +201,7 @@
     value is a list of values for that field. For non-file fields, the value
     is a list of strings.
     """
-    # RFC 2026, Section 5.1 : The "multipart" boundary delimiters are always
+    # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always
     # represented as 7bit US-ASCII.
     boundary = pdict['boundary'].decode('ascii')
     ctype = "multipart/form-data; boundary={}".format(boundary)
@@ -205,7 +212,7 @@
     except KeyError:
         pass
     fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
-        environ={'REQUEST_METHOD': 'POST'})
+        environ={'REQUEST_METHOD': 'POST'}, separator=separator)
     return {k: fs.getlist(k) for k in fs}
 
 def _parseparam(s):
@@ -315,7 +322,7 @@
     def __init__(self, fp=None, headers=None, outerboundary=b'',
                  environ=os.environ, keep_blank_values=0, strict_parsing=0,
                  limit=None, encoding='utf-8', errors='replace',
-                 max_num_fields=None):
+                 max_num_fields=None, separator='&'):
         """Constructor.  Read multipart/* until last part.
 
         Arguments, all optional:
@@ -363,6 +370,7 @@
         self.keep_blank_values = keep_blank_values
         self.strict_parsing = strict_parsing
         self.max_num_fields = max_num_fields
+        self.separator = separator
         if 'REQUEST_METHOD' in environ:
             method = environ['REQUEST_METHOD'].upper()
         self.qs_on_post = None
@@ -589,7 +597,7 @@
         query = urllib.parse.parse_qsl(
             qs, self.keep_blank_values, self.strict_parsing,
             encoding=self.encoding, errors=self.errors,
-            max_num_fields=self.max_num_fields)
+            max_num_fields=self.max_num_fields, separator=self.separator)
         self.list = [MiniFieldStorage(key, value) for key, value in query]
         self.skip_lines()
 
@@ -605,7 +613,7 @@
             query = urllib.parse.parse_qsl(
                 self.qs_on_post, self.keep_blank_values, self.strict_parsing,
                 encoding=self.encoding, errors=self.errors,
-                max_num_fields=self.max_num_fields)
+                max_num_fields=self.max_num_fields, separator=self.separator)
             self.list.extend(MiniFieldStorage(key, value) for key, value in query)
 
         klass = self.FieldStorageClass or self.__class__
@@ -649,7 +657,7 @@
                 else self.limit - self.bytes_read
             part = klass(self.fp, headers, ib, environ, keep_blank_values,
                          strict_parsing, limit,
-                         self.encoding, self.errors, max_num_fields)
+                         self.encoding, self.errors, max_num_fields, self.separator)
 
             if max_num_fields is not None:
                 max_num_fields -= 1
diff --git a/Lib/cgitb.py b/Lib/cgitb.py
index 4f81271..17ddda3 100644
--- a/Lib/cgitb.py
+++ b/Lib/cgitb.py
@@ -181,8 +181,8 @@
 
 
 <!-- The above is a description of an error in a Python program, formatted
-     for a Web browser because the 'cgitb' module was enabled.  In case you
-     are not reading this in a Web browser, here is the original traceback:
+     for a web browser because the 'cgitb' module was enabled.  In case you
+     are not reading this in a web browser, here is the original traceback:
 
 %s
 -->
diff --git a/Lib/codecs.py b/Lib/codecs.py
index 7f23e97..e6ad6e3 100644
--- a/Lib/codecs.py
+++ b/Lib/codecs.py
@@ -83,7 +83,7 @@
 class CodecInfo(tuple):
     """Codec details when looking up the codec registry"""
 
-    # Private API to allow Python 3.4 to blacklist the known non-Unicode
+    # Private API to allow Python 3.4 to denylist the known non-Unicode
     # codecs in the standard library. A more general mechanism to
     # reliably distinguish test encodings from other codecs will hopefully
     # be defined for Python 3.5
@@ -386,7 +386,7 @@
 
     def reset(self):
 
-        """ Flushes and resets the codec buffers used for keeping state.
+        """ Resets the codec buffers used for keeping internal state.
 
             Calling this method should ensure that the data on the
             output is put into a clean state, that allows appending
@@ -620,7 +620,7 @@
 
     def reset(self):
 
-        """ Resets the codec buffers used for keeping state.
+        """ Resets the codec buffers used for keeping internal state.
 
             Note that no stream repositioning should take place.
             This method is primarily intended to be able to recover
diff --git a/Lib/codeop.py b/Lib/codeop.py
index 4c10470..568e9bb 100644
--- a/Lib/codeop.py
+++ b/Lib/codeop.py
@@ -10,30 +10,6 @@
   syntax error (OverflowError and ValueError can be produced by
   malformed literals).
 
-Approach:
-
-First, check if the source consists entirely of blank lines and
-comments; if so, replace it with 'pass', because the built-in
-parser doesn't always do the right thing for these.
-
-Compile three times: as is, with \n, and with \n\n appended.  If it
-compiles as is, it's complete.  If it compiles with one \n appended,
-we expect more.  If it doesn't compile either way, we compare the
-error we get when compiling with \n or \n\n appended.  If the errors
-are the same, the code is broken.  But if the errors are different, we
-expect more.  Not intuitive; not even guaranteed to hold in future
-releases; but this matches the compiler's behavior from Python 1.4
-through 2.2, at least.
-
-Caveat:
-
-It is possible (but not likely) that the parser stops parsing with a
-successful outcome before reaching the end of the source; in this
-case, trailing symbols may be ignored instead of causing an error.
-For example, a backslash followed by two newlines may be followed by
-arbitrary garbage.  This will be fixed once the API for the parser is
-better.
-
 The two interfaces are:
 
 compile_command(source, filename, symbol):
@@ -64,24 +40,25 @@
 
 __all__ = ["compile_command", "Compile", "CommandCompiler"]
 
-PyCF_DONT_IMPLY_DEDENT = 0x200          # Matches pythonrun.h
+# The following flags match the values from Include/cpython/compile.h
+# Caveat emptor: These flags are undocumented on purpose and depending
+# on their effect outside the standard library is **unsupported**.
+PyCF_DONT_IMPLY_DEDENT = 0x200          
+PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000
 
 def _maybe_compile(compiler, source, filename, symbol):
-    # Check for source consisting of only blank lines and comments
+    # Check for source consisting of only blank lines and comments.
     for line in source.split("\n"):
         line = line.strip()
         if line and line[0] != '#':
-            break               # Leave it alone
+            break               # Leave it alone.
     else:
         if symbol != "eval":
             source = "pass"     # Replace it with a 'pass' statement
 
-    err = err1 = err2 = None
-    code = code1 = code2 = None
-
     try:
-        code = compiler(source, filename, symbol)
-    except SyntaxError:
+        return compiler(source, filename, symbol)
+    except SyntaxError:  # Let other compile() errors propagate.
         pass
 
     # Catch syntax warnings after the first compile
@@ -90,25 +67,23 @@
         warnings.simplefilter("error")
 
         try:
-            code1 = compiler(source + "\n", filename, symbol)
+            compiler(source + "\n", filename, symbol)
         except SyntaxError as e:
-            err1 = e
+            if "incomplete input" in str(e):
+                return None
+            raise
 
-        try:
-            code2 = compiler(source + "\n\n", filename, symbol)
-        except SyntaxError as e:
-            err2 = e
-
-    try:
-        if code:
-            return code
-        if not code1 and repr(err1) == repr(err2):
-            raise err1
-    finally:
-        err1 = err2 = None
+def _is_syntax_error(err1, err2):
+    rep1 = repr(err1)
+    rep2 = repr(err2)
+    if "was never closed" in rep1 and "was never closed" in rep2:
+        return False
+    if rep1 == rep2:
+        return True
+    return False
 
 def _compile(source, filename, symbol):
-    return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
+    return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT)
 
 def compile_command(source, filename="<input>", symbol="single"):
     r"""Compile a command and determine whether it is incomplete.
@@ -137,7 +112,7 @@
     statement, it "remembers" and compiles all subsequent program texts
     with the statement in force."""
     def __init__(self):
-        self.flags = PyCF_DONT_IMPLY_DEDENT
+        self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT
 
     def __call__(self, source, filename, symbol):
         codeob = compile(source, filename, symbol, self.flags, True)
diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py
index bc69a67..818588f 100644
--- a/Lib/collections/__init__.py
+++ b/Lib/collections/__init__.py
@@ -27,7 +27,6 @@
 ]
 
 import _collections_abc
-import heapq as _heapq
 import sys as _sys
 
 from itertools import chain as _chain
@@ -52,22 +51,6 @@
     pass
 
 
-def __getattr__(name):
-    # For backwards compatibility, continue to make the collections ABCs
-    # through Python 3.6 available through the collections module.
-    # Note, no new collections ABCs were added in Python 3.7
-    if name in _collections_abc.__all__:
-        obj = getattr(_collections_abc, name)
-        import warnings
-        warnings.warn("Using or importing the ABCs from 'collections' instead "
-                      "of from 'collections.abc' is deprecated since Python 3.3, "
-                      "and in 3.10 it will stop working",
-                      DeprecationWarning, stacklevel=2)
-        globals()[name] = obj
-        return obj
-    raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
-
-
 ################################################################################
 ### OrderedDict
 ################################################################################
@@ -424,7 +407,7 @@
 
     namespace = {
         '_tuple_new': tuple_new,
-        '__builtins__': None,
+        '__builtins__': {},
         '__name__': f'namedtuple_{typename}',
     }
     code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
@@ -489,6 +472,7 @@
         '__repr__': __repr__,
         '_asdict': _asdict,
         '__getnewargs__': __getnewargs__,
+        '__match_args__': field_names,
     }
     for index, name in enumerate(field_names):
         doc = _sys.intern(f'Alias for field number {index}')
@@ -597,6 +581,10 @@
         # Needed so that self[missing_item] does not raise KeyError
         return 0
 
+    def total(self):
+        'Sum of the counts'
+        return sum(self.values())
+
     def most_common(self, n=None):
         '''List the n most common elements and their counts from the most
         common to the least.  If n is None, then list all element counts.
@@ -608,7 +596,10 @@
         # Emulate Bag.sortedByCount from Smalltalk
         if n is None:
             return sorted(self.items(), key=_itemgetter(1), reverse=True)
-        return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
+
+        # Lazy import to speedup Python startup time
+        import heapq
+        return heapq.nlargest(n, self.items(), key=_itemgetter(1))
 
     def elements(self):
         '''Iterator over elements repeating each as many times as its count.
@@ -719,6 +710,42 @@
         if elem in self:
             super().__delitem__(elem)
 
+    def __eq__(self, other):
+        'True if all counts agree. Missing counts are treated as zero.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return all(self[e] == other[e] for c in (self, other) for e in c)
+
+    def __ne__(self, other):
+        'True if any counts disagree. Missing counts are treated as zero.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return not self == other
+
+    def __le__(self, other):
+        'True if all counts in self are a subset of those in other.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return all(self[e] <= other[e] for c in (self, other) for e in c)
+
+    def __lt__(self, other):
+        'True if all counts in self are a proper subset of those in other.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return self <= other and self != other
+
+    def __ge__(self, other):
+        'True if all counts in self are a superset of those in other.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return all(self[e] >= other[e] for c in (self, other) for e in c)
+
+    def __gt__(self, other):
+        'True if all counts in self are a proper superset of those in other.'
+        if not isinstance(other, Counter):
+            return NotImplemented
+        return self >= other and self != other
+
     def __repr__(self):
         if not self:
             return f'{self.__class__.__name__}()'
@@ -739,12 +766,30 @@
     # To strip negative and zero counts, add-in an empty counter:
     #       c += Counter()
     #
-    # Rich comparison operators for multiset subset and superset tests
-    # are deliberately omitted due to semantic conflicts with the
-    # existing inherited dict equality method.  Subset and superset
-    # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q;
-    # however, that would not be the case for p=Counter(a=1, b=0)
-    # and q=Counter(a=1) where the dictionaries are not equal.
+    # Results are ordered according to when an element is first
+    # encountered in the left operand and then by the order
+    # encountered in the right operand.
+    #
+    # When the multiplicities are all zero or one, multiset operations
+    # are guaranteed to be equivalent to the corresponding operations
+    # for regular sets.
+    #     Given counter multisets such as:
+    #         cp = Counter(a=1, b=0, c=1)
+    #         cq = Counter(c=1, d=0, e=1)
+    #     The corresponding regular sets would be:
+    #         sp = {'a', 'c'}
+    #         sq = {'c', 'e'}
+    #     All of the following relations would hold:
+    #         set(cp + cq) == sp | sq
+    #         set(cp - cq) == sp - sq
+    #         set(cp | cq) == sp | sq
+    #         set(cp & cq) == sp & sq
+    #         (cp == cq) == (sp == sq)
+    #         (cp != cq) == (sp != sq)
+    #         (cp <= cq) == (sp <= sq)
+    #         (cp < cq) == (sp < sq)
+    #         (cp >= cq) == (sp >= sq)
+    #         (cp > cq) == (sp > sq)
 
     def __add__(self, other):
         '''Add counts from two counters.
@@ -973,12 +1018,15 @@
 
     __copy__ = copy
 
-    def new_child(self, m=None):                # like Django's Context.push()
+    def new_child(self, m=None, **kwargs):      # like Django's Context.push()
         '''New ChainMap with a new map followed by all previous maps.
         If no map is provided, an empty dict is used.
+        Keyword arguments update the map or new empty dict.
         '''
         if m is None:
-            m = {}
+            m = kwargs
+        elif kwargs:
+            m.update(kwargs)
         return self.__class__(m, *self.maps)
 
     @property
diff --git a/Lib/collections/abc.py b/Lib/collections/abc.py
index 891600d..86ca8b8 100644
--- a/Lib/collections/abc.py
+++ b/Lib/collections/abc.py
@@ -1,2 +1,3 @@
 from _collections_abc import *
 from _collections_abc import __all__
+from _collections_abc import _CallableGenericAlias
diff --git a/Lib/colorsys.py b/Lib/colorsys.py
index b93e384..0f52512 100644
--- a/Lib/colorsys.py
+++ b/Lib/colorsys.py
@@ -75,17 +75,18 @@
 def rgb_to_hls(r, g, b):
     maxc = max(r, g, b)
     minc = min(r, g, b)
-    # XXX Can optimize (maxc+minc) and (maxc-minc)
-    l = (minc+maxc)/2.0
+    sumc = (maxc+minc)
+    rangec = (maxc-minc)
+    l = sumc/2.0
     if minc == maxc:
         return 0.0, l, 0.0
     if l <= 0.5:
-        s = (maxc-minc) / (maxc+minc)
+        s = rangec / sumc
     else:
-        s = (maxc-minc) / (2.0-maxc-minc)
-    rc = (maxc-r) / (maxc-minc)
-    gc = (maxc-g) / (maxc-minc)
-    bc = (maxc-b) / (maxc-minc)
+        s = rangec / (2.0-sumc)
+    rc = (maxc-r) / rangec
+    gc = (maxc-g) / rangec
+    bc = (maxc-b) / rangec
     if r == maxc:
         h = bc-gc
     elif g == maxc:
diff --git a/Lib/compileall.py b/Lib/compileall.py
index fe7f450..3755e76 100644
--- a/Lib/compileall.py
+++ b/Lib/compileall.py
@@ -84,12 +84,14 @@
     if workers < 0:
         raise ValueError('workers must be greater or equal to 0')
     if workers != 1:
+        # Check if this is a system where ProcessPoolExecutor can function.
+        from concurrent.futures.process import _check_system_limits
         try:
-            # Only import when needed, as low resource platforms may
-            # fail to import it
-            from concurrent.futures import ProcessPoolExecutor
-        except ImportError:
+            _check_system_limits()
+        except NotImplementedError:
             workers = 1
+        else:
+            from concurrent.futures import ProcessPoolExecutor
     if maxlevels is None:
         maxlevels = sys.getrecursionlimit()
     files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels)
@@ -219,8 +221,8 @@
             if not force:
                 try:
                     mtime = int(os.stat(fullname).st_mtime)
-                    expect = struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
-                                         0, mtime)
+                    expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER,
+                                         0, mtime & 0xFFFF_FFFF)
                     for cfile in opt_cfiles.values():
                         with open(cfile, 'rb') as chandle:
                             actual = chandle.read(12)
@@ -252,9 +254,8 @@
                 else:
                     print('*** ', end='')
                 # escape non-printable characters in msg
-                msg = err.msg.encode(sys.stdout.encoding,
-                                     errors='backslashreplace')
-                msg = msg.decode(sys.stdout.encoding)
+                encoding = sys.stdout.encoding or sys.getdefaultencoding()
+                msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding)
                 print(msg)
             except (SyntaxError, UnicodeError, OSError) as e:
                 success = False
@@ -366,9 +367,9 @@
                               'environment variable is set, and '
                               '"timestamp" otherwise.'))
     parser.add_argument('-o', action='append', type=int, dest='opt_levels',
-                        help=('Optimization levels to run compilation with.'
-                              'Default is -1 which uses optimization level of'
-                              'Python interpreter itself (specified by -O).'))
+                        help=('Optimization levels to run compilation with. '
+                              'Default is -1 which uses the optimization level '
+                              'of the Python interpreter itself (see -O).'))
     parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
                         help='Ignore symlinks pointing outsite of the DIR')
     parser.add_argument('--hardlink-dupes', action='store_true',
@@ -405,7 +406,8 @@
     # if flist is provided then load it
     if args.flist:
         try:
-            with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
+            with (sys.stdin if args.flist=='-' else
+                    open(args.flist, encoding="utf-8")) as f:
                 for line in f:
                     compile_dests.append(line.strip())
         except OSError:
diff --git a/Lib/concurrent/futures/_base.py b/Lib/concurrent/futures/_base.py
index 00eb548..5c00f2e 100644
--- a/Lib/concurrent/futures/_base.py
+++ b/Lib/concurrent/futures/_base.py
@@ -284,13 +284,14 @@
         A named 2-tuple of sets. The first set, named 'done', contains the
         futures that completed (is finished or cancelled) before the wait
         completed. The second set, named 'not_done', contains uncompleted
-        futures.
+        futures. Duplicate futures given to *fs* are removed and will be 
+        returned only once.
     """
+    fs = set(fs)
     with _AcquireFutures(fs):
-        done = set(f for f in fs
-                   if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
-        not_done = set(fs) - done
-
+        done = {f for f in fs
+                   if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]}
+        not_done = fs - done
         if (return_when == FIRST_COMPLETED) and done:
             return DoneAndNotDoneFutures(done, not_done)
         elif (return_when == FIRST_EXCEPTION) and done:
@@ -309,7 +310,7 @@
             f._waiters.remove(waiter)
 
     done.update(waiter.finished_futures)
-    return DoneAndNotDoneFutures(done, set(fs) - done)
+    return DoneAndNotDoneFutures(done, fs - done)
 
 class Future(object):
     """Represents the result of an asynchronous computation."""
@@ -386,7 +387,11 @@
 
     def __get_result(self):
         if self._exception:
-            raise self._exception
+            try:
+                raise self._exception
+            finally:
+                # Break a reference cycle with the exception in self._exception
+                self = None
         else:
             return self._result
 
@@ -426,20 +431,24 @@
                 timeout.
             Exception: If the call raised then that exception will be raised.
         """
-        with self._condition:
-            if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
-                raise CancelledError()
-            elif self._state == FINISHED:
-                return self.__get_result()
+        try:
+            with self._condition:
+                if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+                    raise CancelledError()
+                elif self._state == FINISHED:
+                    return self.__get_result()
 
-            self._condition.wait(timeout)
+                self._condition.wait(timeout)
 
-            if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
-                raise CancelledError()
-            elif self._state == FINISHED:
-                return self.__get_result()
-            else:
-                raise TimeoutError()
+                if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+                    raise CancelledError()
+                elif self._state == FINISHED:
+                    return self.__get_result()
+                else:
+                    raise TimeoutError()
+        finally:
+            # Break a reference cycle with the exception in self._exception
+            self = None
 
     def exception(self, timeout=None):
         """Return the exception raised by the call that the future represents.
diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py
index 90bc98b..6ee2ce6 100644
--- a/Lib/concurrent/futures/process.py
+++ b/Lib/concurrent/futures/process.py
@@ -373,7 +373,7 @@
         assert not self.thread_wakeup._closed
         wakeup_reader = self.thread_wakeup._reader
         readers = [result_reader, wakeup_reader]
-        worker_sentinels = [p.sentinel for p in self.processes.values()]
+        worker_sentinels = [p.sentinel for p in list(self.processes.values())]
         ready = mp.connection.wait(readers + worker_sentinels)
 
         cause = None
@@ -533,6 +533,14 @@
             raise NotImplementedError(_system_limited)
     _system_limits_checked = True
     try:
+        import multiprocessing.synchronize
+    except ImportError:
+        _system_limited = (
+            "This Python build lacks multiprocessing.synchronize, usually due "
+            "to named semaphores being unavailable on this platform."
+        )
+        raise NotImplementedError(_system_limited)
+    try:
         nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
     except (AttributeError, ValueError):
         # sysconf not available or setting not available
diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py
index b7a2cac..51c942f 100644
--- a/Lib/concurrent/futures/thread.py
+++ b/Lib/concurrent/futures/thread.py
@@ -36,6 +36,12 @@
 # See bpo-39812 for context.
 threading._register_atexit(_python_exit)
 
+# At fork, reinitialize the `_global_shutdown_lock` lock in the child process
+if hasattr(os, 'register_at_fork'):
+    os.register_at_fork(before=_global_shutdown_lock.acquire,
+                        after_in_child=_global_shutdown_lock._at_fork_reinit,
+                        after_in_parent=_global_shutdown_lock.release)
+
 
 class _WorkItem(object):
     def __init__(self, future, fn, args, kwargs):
diff --git a/Lib/configparser.py b/Lib/configparser.py
index 924cc56..3470624 100644
--- a/Lib/configparser.py
+++ b/Lib/configparser.py
@@ -316,7 +316,7 @@
     def filename(self):
         """Deprecated, use `source'."""
         warnings.warn(
-            "The 'filename' attribute will be removed in future versions.  "
+            "The 'filename' attribute will be removed in Python 3.12. "
             "Use 'source' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -326,7 +326,7 @@
     def filename(self, value):
         """Deprecated, user `source'."""
         warnings.warn(
-            "The 'filename' attribute will be removed in future versions.  "
+            "The 'filename' attribute will be removed in Python 3.12. "
             "Use 'source' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -563,7 +563,7 @@
     # Regular expressions for parsing section headers and options
     _SECT_TMPL = r"""
         \[                                 # [
-        (?P<header>[^]]+)                  # very permissive!
+        (?P<header>.+)                     # very permissive!
         \]                                 # ]
         """
     _OPT_TMPL = r"""
@@ -690,6 +690,7 @@
         """
         if isinstance(filenames, (str, bytes, os.PathLike)):
             filenames = [filenames]
+        encoding = io.text_encoding(encoding)
         read_ok = []
         for filename in filenames:
             try:
@@ -756,7 +757,7 @@
     def readfp(self, fp, filename=None):
         """Deprecated, use read_file instead."""
         warnings.warn(
-            "This method will be removed in future versions.  "
+            "This method will be removed in Python 3.12. "
             "Use 'parser.read_file()' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -907,6 +908,9 @@
 
         If `space_around_delimiters' is True (the default), delimiters
         between keys and values are surrounded by spaces.
+
+        Please note that comments in the original configuration file are not
+        preserved when writing the configuration back.
         """
         if space_around_delimiters:
             d = " {} ".format(self._delimiters[0])
@@ -1005,7 +1009,7 @@
         Configuration files may include comments, prefixed by specific
         characters (`#' and `;' by default). Comments may appear on their own
         in an otherwise empty line or may be entered in lines holding values or
-        section names.
+        section names. Please note that comments get stripped off when reading configuration files.
         """
         elements_added = set()
         cursect = None                        # None, or a dictionary
@@ -1228,7 +1232,7 @@
         super().__init__(*args, **kwargs)
         warnings.warn(
             "The SafeConfigParser class has been renamed to ConfigParser "
-            "in Python 3.2. This alias will be removed in future versions."
+            "in Python 3.2. This alias will be removed in Python 3.12."
             " Use ConfigParser directly instead.",
             DeprecationWarning, stacklevel=2
         )
diff --git a/Lib/contextlib.py b/Lib/contextlib.py
index ff92d9f..c63a849 100644
--- a/Lib/contextlib.py
+++ b/Lib/contextlib.py
@@ -9,7 +9,7 @@
 __all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
            "AbstractContextManager", "AbstractAsyncContextManager",
            "AsyncExitStack", "ContextDecorator", "ExitStack",
-           "redirect_stdout", "redirect_stderr", "suppress"]
+           "redirect_stdout", "redirect_stderr", "suppress", "aclosing"]
 
 
 class AbstractContextManager(abc.ABC):
@@ -80,6 +80,22 @@
         return inner
 
 
+class AsyncContextDecorator(object):
+    "A base class or mixin that enables async context managers to work as decorators."
+
+    def _recreate_cm(self):
+        """Return a recreated instance of self.
+        """
+        return self
+
+    def __call__(self, func):
+        @wraps(func)
+        async def inner(*args, **kwds):
+            async with self._recreate_cm():
+                return await func(*args, **kwds)
+        return inner
+
+
 class _GeneratorContextManagerBase:
     """Shared functionality for @contextmanager and @asynccontextmanager."""
 
@@ -97,18 +113,20 @@
         # for the class instead.
         # See http://bugs.python.org/issue19404 for more details.
 
-
-class _GeneratorContextManager(_GeneratorContextManagerBase,
-                               AbstractContextManager,
-                               ContextDecorator):
-    """Helper for @contextmanager decorator."""
-
     def _recreate_cm(self):
-        # _GCM instances are one-shot context managers, so the
+        # _GCMB instances are one-shot context managers, so the
         # CM must be recreated each time a decorated function is
         # called
         return self.__class__(self.func, self.args, self.kwds)
 
+
+class _GeneratorContextManager(
+    _GeneratorContextManagerBase,
+    AbstractContextManager,
+    ContextDecorator,
+):
+    """Helper for @contextmanager decorator."""
+
     def __enter__(self):
         # do not keep args and kwds alive unnecessarily
         # they are only needed for recreation, which is not possible anymore
@@ -118,8 +136,8 @@
         except StopIteration:
             raise RuntimeError("generator didn't yield") from None
 
-    def __exit__(self, type, value, traceback):
-        if type is None:
+    def __exit__(self, typ, value, traceback):
+        if typ is None:
             try:
                 next(self.gen)
             except StopIteration:
@@ -130,9 +148,9 @@
             if value is None:
                 # Need to force instantiation so we can reliably
                 # tell if we get the same exception back
-                value = type()
+                value = typ()
             try:
-                self.gen.throw(type, value, traceback)
+                self.gen.throw(typ, value, traceback)
             except StopIteration as exc:
                 # Suppress StopIteration *unless* it's the same exception that
                 # was passed to throw().  This prevents a StopIteration
@@ -142,74 +160,93 @@
                 # Don't re-raise the passed in exception. (issue27122)
                 if exc is value:
                     return False
-                # Likewise, avoid suppressing if a StopIteration exception
+                # Avoid suppressing if a StopIteration exception
                 # was passed to throw() and later wrapped into a RuntimeError
-                # (see PEP 479).
-                if type is StopIteration and exc.__cause__ is value:
+                # (see PEP 479 for sync generators; async generators also
+                # have this behavior). But do this only if the exception wrapped
+                # by the RuntimeError is actually Stop(Async)Iteration (see
+                # issue29692).
+                if (
+                    isinstance(value, StopIteration)
+                    and exc.__cause__ is value
+                ):
                     return False
                 raise
-            except:
+            except BaseException as exc:
                 # only re-raise if it's *not* the exception that was
                 # passed to throw(), because __exit__() must not raise
                 # an exception unless __exit__() itself failed.  But throw()
                 # has to raise the exception to signal propagation, so this
                 # fixes the impedance mismatch between the throw() protocol
                 # and the __exit__() protocol.
-                #
-                # This cannot use 'except BaseException as exc' (as in the
-                # async implementation) to maintain compatibility with
-                # Python 2, where old-style class exceptions are not caught
-                # by 'except BaseException'.
-                if sys.exc_info()[1] is value:
-                    return False
-                raise
+                if exc is not value:
+                    raise
+                return False
             raise RuntimeError("generator didn't stop after throw()")
 
-
-class _AsyncGeneratorContextManager(_GeneratorContextManagerBase,
-                                    AbstractAsyncContextManager):
-    """Helper for @asynccontextmanager."""
+class _AsyncGeneratorContextManager(
+    _GeneratorContextManagerBase,
+    AbstractAsyncContextManager,
+    AsyncContextDecorator,
+):
+    """Helper for @asynccontextmanager decorator."""
 
     async def __aenter__(self):
+        # do not keep args and kwds alive unnecessarily
+        # they are only needed for recreation, which is not possible anymore
+        del self.args, self.kwds, self.func
         try:
-            return await self.gen.__anext__()
+            return await anext(self.gen)
         except StopAsyncIteration:
             raise RuntimeError("generator didn't yield") from None
 
     async def __aexit__(self, typ, value, traceback):
         if typ is None:
             try:
-                await self.gen.__anext__()
+                await anext(self.gen)
             except StopAsyncIteration:
-                return
+                return False
             else:
                 raise RuntimeError("generator didn't stop")
         else:
             if value is None:
+                # Need to force instantiation so we can reliably
+                # tell if we get the same exception back
                 value = typ()
-            # See _GeneratorContextManager.__exit__ for comments on subtleties
-            # in this implementation
             try:
                 await self.gen.athrow(typ, value, traceback)
-                raise RuntimeError("generator didn't stop after athrow()")
             except StopAsyncIteration as exc:
+                # Suppress StopIteration *unless* it's the same exception that
+                # was passed to throw().  This prevents a StopIteration
+                # raised inside the "with" statement from being suppressed.
                 return exc is not value
             except RuntimeError as exc:
+                # Don't re-raise the passed in exception. (issue27122)
                 if exc is value:
                     return False
-                # Avoid suppressing if a StopIteration exception
-                # was passed to throw() and later wrapped into a RuntimeError
+                # Avoid suppressing if a Stop(Async)Iteration exception
+                # was passed to athrow() and later wrapped into a RuntimeError
                 # (see PEP 479 for sync generators; async generators also
                 # have this behavior). But do this only if the exception wrapped
                 # by the RuntimeError is actully Stop(Async)Iteration (see
                 # issue29692).
-                if isinstance(value, (StopIteration, StopAsyncIteration)):
-                    if exc.__cause__ is value:
-                        return False
+                if (
+                    isinstance(value, (StopIteration, StopAsyncIteration))
+                    and exc.__cause__ is value
+                ):
+                    return False
                 raise
             except BaseException as exc:
+                # only re-raise if it's *not* the exception that was
+                # passed to throw(), because __exit__() must not raise
+                # an exception unless __exit__() itself failed.  But throw()
+                # has to raise the exception to signal propagation, so this
+                # fixes the impedance mismatch between the throw() protocol
+                # and the __exit__() protocol.
                 if exc is not value:
                     raise
+                return False
+            raise RuntimeError("generator didn't stop after athrow()")
 
 
 def contextmanager(func):
@@ -303,6 +340,32 @@
         self.thing.close()
 
 
+class aclosing(AbstractAsyncContextManager):
+    """Async context manager for safely finalizing an asynchronously cleaned-up
+    resource such as an async generator, calling its ``aclose()`` method.
+
+    Code like this:
+
+        async with aclosing(<module>.fetch(<arguments>)) as agen:
+            <block>
+
+    is equivalent to this:
+
+        agen = <module>.fetch(<arguments>)
+        try:
+            <block>
+        finally:
+            await agen.aclose()
+
+    """
+    def __init__(self, thing):
+        self.thing = thing
+    async def __aenter__(self):
+        return self.thing
+    async def __aexit__(self, *exc_info):
+        await self.thing.aclose()
+
+
 class _RedirectStream(AbstractContextManager):
 
     _stream = None
@@ -477,10 +540,10 @@
             # Context may not be correct, so find the end of the chain
             while 1:
                 exc_context = new_exc.__context__
-                if exc_context is old_exc:
+                if exc_context is None or exc_context is old_exc:
                     # Context is already set correctly (see issue 20317)
                     return
-                if exc_context is None or exc_context is frame_exc:
+                if exc_context is frame_exc:
                     break
                 new_exc = exc_context
             # Change the end of the chain to point to the exception
@@ -611,10 +674,10 @@
             # Context may not be correct, so find the end of the chain
             while 1:
                 exc_context = new_exc.__context__
-                if exc_context is old_exc:
+                if exc_context is None or exc_context is old_exc:
                     # Context is already set correctly (see issue 20317)
                     return
-                if exc_context is None or exc_context is frame_exc:
+                if exc_context is frame_exc:
                     break
                 new_exc = exc_context
             # Change the end of the chain to point to the exception
@@ -655,7 +718,7 @@
         return received_exc and suppressed_exc
 
 
-class nullcontext(AbstractContextManager):
+class nullcontext(AbstractContextManager, AbstractAsyncContextManager):
     """Context manager that does no additional processing.
 
     Used as a stand-in for a normal context manager, when a particular
@@ -674,3 +737,9 @@
 
     def __exit__(self, *excinfo):
         pass
+
+    async def __aenter__(self):
+        return self.enter_result
+
+    async def __aexit__(self, *excinfo):
+        pass
diff --git a/Lib/copy.py b/Lib/copy.py
index 41873f2..69bac98 100644
--- a/Lib/copy.py
+++ b/Lib/copy.py
@@ -39,8 +39,8 @@
     set of components copied
 
 This version does not copy types like module, class, function, method,
-nor stack trace, stack frame, nor file, socket, window, nor array, nor
-any similar types.
+nor stack trace, stack frame, nor file, socket, window, nor any
+similar types.
 
 Classes can use the same interfaces to control copying that they use
 to control pickling: they can define methods called __getinitargs__(),
@@ -192,6 +192,7 @@
 d[str] = _deepcopy_atomic
 d[types.CodeType] = _deepcopy_atomic
 d[type] = _deepcopy_atomic
+d[range] = _deepcopy_atomic
 d[types.BuiltinFunctionType] = _deepcopy_atomic
 d[types.FunctionType] = _deepcopy_atomic
 d[weakref.ref] = _deepcopy_atomic
diff --git a/Lib/copyreg.py b/Lib/copyreg.py
index 7ab8c12..356db6f 100644
--- a/Lib/copyreg.py
+++ b/Lib/copyreg.py
@@ -36,6 +36,12 @@
 
     pickle(complex, pickle_complex, complex)
 
+def pickle_union(obj):
+    import functools, operator
+    return functools.reduce, (operator.or_, obj.__args__)
+
+pickle(type(int | str), pickle_union)
+
 # Support for pickling new-style objects
 
 def _reconstructor(cls, base, state):
diff --git a/Lib/csv.py b/Lib/csv.py
index dc85077..bb3ee26 100644
--- a/Lib/csv.py
+++ b/Lib/csv.py
@@ -409,14 +409,10 @@
                 continue # skip rows that have irregular number of columns
 
             for col in list(columnTypes.keys()):
-
-                for thisType in [int, float, complex]:
-                    try:
-                        thisType(row[col])
-                        break
-                    except (ValueError, OverflowError):
-                        pass
-                else:
+                thisType = complex
+                try:
+                    thisType(row[col])
+                except (ValueError, OverflowError):
                     # fallback to length of string
                     thisType = len(row[col])
 
diff --git a/Lib/ctypes/_aix.py b/Lib/ctypes/_aix.py
index 190cac6..fc3e95c 100644
--- a/Lib/ctypes/_aix.py
+++ b/Lib/ctypes/_aix.py
@@ -163,7 +163,7 @@
             return member
     else:
         # 32-bit legacy names - both shr.o and shr4.o exist.
-        # shr.o is the preffered name so we look for shr.o first
+        # shr.o is the preferred name so we look for shr.o first
         #  i.e., shr4.o is returned only when shr.o does not exist
         for name in ['shr.o', 'shr4.o']:
             member = get_one_match(re.escape(name), members)
@@ -282,7 +282,7 @@
         if path.exists(archive):
             members = get_shared(get_ld_headers(archive))
             member = get_member(re.escape(name), members)
-            if member != None:
+            if member is not None:
                 return (base, member)
             else:
                 return (None, None)
@@ -307,7 +307,7 @@
 
     libpaths = get_libpaths()
     (base, member) = find_shared(libpaths, name)
-    if base != None:
+    if base is not None:
         return f"{base}({member})"
 
     # To get here, a member in an archive has not been found
diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py
index 530d3e9..105a95b 100644
--- a/Lib/dataclasses.py
+++ b/Lib/dataclasses.py
@@ -6,8 +6,9 @@
 import keyword
 import builtins
 import functools
+import abc
 import _thread
-from types import GenericAlias
+from types import FunctionType, GenericAlias
 
 
 __all__ = ['dataclass',
@@ -15,6 +16,7 @@
            'Field',
            'FrozenInstanceError',
            'InitVar',
+           'KW_ONLY',
            'MISSING',
 
            # Helper functions.
@@ -151,6 +153,20 @@
 #
 # See _hash_action (below) for a coded version of this table.
 
+# __match_args__
+#
+#    +--- match_args= parameter
+#    |
+#    v    |       |       |
+#         |  no   |  yes  |  <--- class has __match_args__ in __dict__?
+# +=======+=======+=======+
+# | False |       |       |
+# +-------+-------+-------+
+# | True  | add   |       |  <- the default
+# +=======+=======+=======+
+# __match_args__ is always added unless the class already defines it. It is a
+# tuple of __init__ parameter names; non-init fields must be matched by keyword.
+
 
 # Raised when an attempt is made to modify a frozen class.
 class FrozenInstanceError(AttributeError): pass
@@ -169,6 +185,12 @@
     pass
 MISSING = _MISSING_TYPE()
 
+# A sentinel object to indicate that following fields are keyword-only by
+# default.  Use a class to give it a better repr.
+class _KW_ONLY_TYPE:
+    pass
+KW_ONLY = _KW_ONLY_TYPE()
+
 # Since most per-field metadata will be unused, create an empty
 # read-only proxy that can be shared among all fields.
 _EMPTY_METADATA = types.MappingProxyType({})
@@ -207,7 +229,7 @@
         self.type = type
 
     def __repr__(self):
-        if isinstance(self.type, type):
+        if isinstance(self.type, type) and not isinstance(self.type, GenericAlias):
             type_name = self.type.__name__
         else:
             # typing objects, e.g. List[int]
@@ -217,7 +239,6 @@
     def __class_getitem__(cls, type):
         return InitVar(type)
 
-
 # Instances of Field are only ever created from within this module,
 # and only from the field() function, although Field instances are
 # exposed externally as (conceptually) read-only objects.
@@ -238,11 +259,12 @@
                  'init',
                  'compare',
                  'metadata',
+                 'kw_only',
                  '_field_type',  # Private: not to be used by user code.
                  )
 
     def __init__(self, default, default_factory, init, repr, hash, compare,
-                 metadata):
+                 metadata, kw_only):
         self.name = None
         self.type = None
         self.default = default
@@ -254,6 +276,7 @@
         self.metadata = (_EMPTY_METADATA
                          if metadata is None else
                          types.MappingProxyType(metadata))
+        self.kw_only = kw_only
         self._field_type = None
 
     def __repr__(self):
@@ -267,6 +290,7 @@
                 f'hash={self.hash!r},'
                 f'compare={self.compare!r},'
                 f'metadata={self.metadata!r},'
+                f'kw_only={self.kw_only!r},'
                 f'_field_type={self._field_type}'
                 ')')
 
@@ -320,17 +344,19 @@
 # so that a type checker can be told (via overloads) that this is a
 # function whose type depends on its parameters.
 def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
-          hash=None, compare=True, metadata=None):
+          hash=None, compare=True, metadata=None, kw_only=MISSING):
     """Return an object to identify dataclass fields.
 
     default is the default value of the field.  default_factory is a
     0-argument function called to initialize a field's value.  If init
-    is True, the field will be a parameter to the class's __init__()
-    function.  If repr is True, the field will be included in the
-    object's repr().  If hash is True, the field will be included in
-    the object's hash().  If compare is True, the field will be used
-    in comparison functions.  metadata, if specified, must be a
-    mapping which is stored but not otherwise examined by dataclass.
+    is true, the field will be a parameter to the class's __init__()
+    function.  If repr is true, the field will be included in the
+    object's repr().  If hash is true, the field will be included in the
+    object's hash().  If compare is true, the field will be used in
+    comparison functions.  metadata, if specified, must be a mapping
+    which is stored but not otherwise examined by dataclass.  If kw_only
+    is true, the field will become a keyword-only parameter to
+    __init__().
 
     It is an error to specify both default and default_factory.
     """
@@ -338,7 +364,16 @@
     if default is not MISSING and default_factory is not MISSING:
         raise ValueError('cannot specify both default and default_factory')
     return Field(default, default_factory, init, repr, hash, compare,
-                 metadata)
+                 metadata, kw_only)
+
+
+def _fields_in_init_order(fields):
+    # Returns the fields as __init__ will output them.  It returns 2 tuples:
+    # the first for normal args, and the second for keyword args.
+
+    return (tuple(f for f in fields if f.init and not f.kw_only),
+            tuple(f for f in fields if f.init and f.kw_only)
+            )
 
 
 def _tuple_str(obj_name, fields):
@@ -395,7 +430,6 @@
 
     local_vars = ', '.join(locals.keys())
     txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}"
-
     ns = {}
     exec(txt, globals, ns)
     return ns['__create_fn__'](**locals)
@@ -413,7 +447,7 @@
     return f'{self_name}.{name}={value}'
 
 
-def _field_init(f, frozen, globals, self_name):
+def _field_init(f, frozen, globals, self_name, slots):
     # Return the text of the line in the body of __init__ that will
     # initialize this field.
 
@@ -453,9 +487,15 @@
                 globals[default_name] = f.default
                 value = f.name
         else:
-            # This field does not need initialization.  Signify that
-            # to the caller by returning None.
-            return None
+            # If the class has slots, then initialize this field.
+            if slots and f.default is not MISSING:
+                globals[default_name] = f.default
+                value = default_name
+            else:
+                # This field does not need initialization: reading from it will
+                # just use the class attribute that contains the default.
+                # Signify that to the caller by returning None.
+                return None
 
     # Only test this now, so that we can create variables for the
     # default.  However, return None to signify that we're not going
@@ -486,7 +526,8 @@
     return f'{f.name}:_type_{f.name}{default}'
 
 
-def _init_fn(fields, frozen, has_post_init, self_name, globals):
+def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init,
+             self_name, globals, slots):
     # fields contains both real fields and InitVar pseudo-fields.
 
     # Make sure we don't have fields without defaults following fields
@@ -494,9 +535,10 @@
     # function source code, but catching it here gives a better error
     # message, and future-proofs us in case we build up the function
     # using ast.
+
     seen_default = False
-    for f in fields:
-        # Only consider fields in the __init__ call.
+    for f in std_fields:
+        # Only consider the non-kw-only fields in the __init__ call.
         if f.init:
             if not (f.default is MISSING and f.default_factory is MISSING):
                 seen_default = True
@@ -512,7 +554,7 @@
 
     body_lines = []
     for f in fields:
-        line = _field_init(f, frozen, locals, self_name)
+        line = _field_init(f, frozen, locals, self_name, slots)
         # line is None means that this field doesn't require
         # initialization (it's a pseudo-field).  Just skip it.
         if line:
@@ -528,8 +570,15 @@
     if not body_lines:
         body_lines = ['pass']
 
+    _init_params = [_init_param(f) for f in std_fields]
+    if kw_only_fields:
+        # Add the keyword-only args.  Because the * can only be added if
+        # there's at least one keyword-only arg, there needs to be a test here
+        # (instead of just concatenting the lists together).
+        _init_params += ['*']
+        _init_params += [_init_param(f) for f in kw_only_fields]
     return _create_fn('__init__',
-                      [self_name] + [_init_param(f) for f in fields if f.init],
+                      [self_name] + _init_params,
                       body_lines,
                       locals=locals,
                       globals=globals,
@@ -608,6 +657,9 @@
     return (a_type is dataclasses.InitVar
             or type(a_type) is dataclasses.InitVar)
 
+def _is_kw_only(a_type, dataclasses):
+    return a_type is dataclasses.KW_ONLY
+
 
 def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
     # Given a type annotation string, does it refer to a_type in
@@ -668,10 +720,11 @@
     return False
 
 
-def _get_field(cls, a_name, a_type):
-    # Return a Field object for this field name and type.  ClassVars
-    # and InitVars are also returned, but marked as such (see
-    # f._field_type).
+def _get_field(cls, a_name, a_type, default_kw_only):
+    # Return a Field object for this field name and type.  ClassVars and
+    # InitVars are also returned, but marked as such (see f._field_type).
+    # default_kw_only is the value of kw_only to use if there isn't a field()
+    # that defines it.
 
     # If the default value isn't derived from Field, then it's only a
     # normal default value.  Convert it to a Field().
@@ -696,7 +749,7 @@
     # In addition to checking for actual types here, also check for
     # string annotations.  get_type_hints() won't always work for us
     # (see https://github.com/python/typing/issues/508 for example),
-    # plus it's expensive and would require an eval for every stirng
+    # plus it's expensive and would require an eval for every string
     # annotation.  So, make a best effort to see if this is a ClassVar
     # or InitVar using regex's and checking that the thing referenced
     # is actually of the correct type.
@@ -742,6 +795,19 @@
         # init=<not-the-default-init-value>)?  It makes no sense for
         # ClassVar and InitVar to specify init=<anything>.
 
+    # kw_only validation and assignment.
+    if f._field_type in (_FIELD, _FIELD_INITVAR):
+        # For real and InitVar fields, if kw_only wasn't specified use the
+        # default value.
+        if f.kw_only is MISSING:
+            f.kw_only = default_kw_only
+    else:
+        # Make sure kw_only isn't set for ClassVars
+        assert f._field_type is _FIELD_CLASSVAR
+        if f.kw_only is not MISSING:
+            raise TypeError(f'field {f.name} is a ClassVar but specifies '
+                            'kw_only')
+
     # For real fields, disallow mutable defaults for known types.
     if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
         raise ValueError(f'mutable default {type(f.default)} for field '
@@ -749,12 +815,19 @@
 
     return f
 
+def _set_qualname(cls, value):
+    # Ensure that the functions returned from _create_fn uses the proper
+    # __qualname__ (the class they belong to).
+    if isinstance(value, FunctionType):
+        value.__qualname__ = f"{cls.__qualname__}.{value.__name__}"
+    return value
 
 def _set_new_attribute(cls, name, value):
     # Never overwrites an existing attribute.  Returns True if the
     # attribute already exists.
     if name in cls.__dict__:
         return True
+    _set_qualname(cls, value)
     setattr(cls, name, value)
     return False
 
@@ -769,7 +842,7 @@
 
 def _hash_add(cls, fields, globals):
     flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
-    return _hash_fn(flds, globals)
+    return _set_qualname(cls, _hash_fn(flds, globals))
 
 def _hash_exception(cls, fields, globals):
     # Raise an exception.
@@ -806,7 +879,8 @@
 # version of this table.
 
 
-def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
+def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen,
+                   match_args, kw_only, slots):
     # Now that dicts retain insertion order, there's no reason to use
     # an ordered dict.  I am leveraging that ordering here, because
     # derived class fields overwrite base class fields, but the order
@@ -836,7 +910,7 @@
         # Only process classes that have been processed by our
         # decorator.  That is, they have a _FIELDS attribute.
         base_fields = getattr(b, _FIELDS, None)
-        if base_fields:
+        if base_fields is not None:
             has_dataclass_bases = True
             for f in base_fields.values():
                 fields[f.name] = f
@@ -860,8 +934,27 @@
     # Now find fields in our class.  While doing so, validate some
     # things, and set the default values (as class attributes) where
     # we can.
-    cls_fields = [_get_field(cls, name, type)
-                  for name, type in cls_annotations.items()]
+    cls_fields = []
+    # Get a reference to this module for the _is_kw_only() test.
+    KW_ONLY_seen = False
+    dataclasses = sys.modules[__name__]
+    for name, type in cls_annotations.items():
+        # See if this is a marker to change the value of kw_only.
+        if (_is_kw_only(type, dataclasses)
+            or (isinstance(type, str)
+                and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY,
+                             _is_kw_only))):
+            # Switch the default to kw_only=True, and ignore this
+            # annotation: it's not a real field.
+            if KW_ONLY_seen:
+                raise TypeError(f'{name!r} is KW_ONLY, but KW_ONLY '
+                                'has already been specified')
+            KW_ONLY_seen = True
+            kw_only = True
+        else:
+            # Otherwise it's a field of some type.
+            cls_fields.append(_get_field(cls, name, type, kw_only))
+
     for f in cls_fields:
         fields[f.name] = f
 
@@ -916,15 +1009,22 @@
     if order and not eq:
         raise ValueError('eq must be true if order is true')
 
+    # Include InitVars and regular fields (so, not ClassVars).  This is
+    # initialized here, outside of the "if init:" test, because std_init_fields
+    # is used with match_args, below.
+    all_init_fields = [f for f in fields.values()
+                       if f._field_type in (_FIELD, _FIELD_INITVAR)]
+    (std_init_fields,
+     kw_only_init_fields) = _fields_in_init_order(all_init_fields)
+
     if init:
         # Does this class have a post-init function?
         has_post_init = hasattr(cls, _POST_INIT_NAME)
 
-        # Include InitVars and regular fields (so, not ClassVars).
-        flds = [f for f in fields.values()
-                if f._field_type in (_FIELD, _FIELD_INITVAR)]
         _set_new_attribute(cls, '__init__',
-                           _init_fn(flds,
+                           _init_fn(all_init_fields,
+                                    std_init_fields,
+                                    kw_only_init_fields,
                                     frozen,
                                     has_post_init,
                                     # The name to use for the "self"
@@ -933,6 +1033,7 @@
                                     '__dataclass_self__' if 'self' in fields
                                             else 'self',
                                     globals,
+                                    slots,
                           ))
 
     # Get the fields as a list, and include only real fields.  This is
@@ -944,7 +1045,7 @@
         _set_new_attribute(cls, '__repr__', _repr_fn(flds, globals))
 
     if eq:
-        # Create _eq__ method.  There's no need for a __ne__ method,
+        # Create __eq__ method.  There's no need for a __ne__ method,
         # since python will call __eq__ and negate it.
         flds = [f for f in field_list if f.compare]
         self_tuple = _tuple_str('self', flds)
@@ -992,11 +1093,70 @@
         cls.__doc__ = (cls.__name__ +
                        str(inspect.signature(cls)).replace(' -> None', ''))
 
+    if match_args:
+        # I could probably compute this once
+        _set_new_attribute(cls, '__match_args__',
+                           tuple(f.name for f in std_init_fields))
+
+    if slots:
+        cls = _add_slots(cls, frozen)
+
+    abc.update_abstractmethods(cls)
+
+    return cls
+
+
+# _dataclass_getstate and _dataclass_setstate are needed for pickling frozen
+# classes with slots.  These could be slighly more performant if we generated
+# the code instead of iterating over fields.  But that can be a project for
+# another day, if performance becomes an issue.
+def _dataclass_getstate(self):
+    return [getattr(self, f.name) for f in fields(self)]
+
+
+def _dataclass_setstate(self, state):
+    for field, value in zip(fields(self), state):
+        # use setattr because dataclass may be frozen
+        object.__setattr__(self, field.name, value)
+
+
+def _add_slots(cls, is_frozen):
+    # Need to create a new class, since we can't set __slots__
+    #  after a class has been created.
+
+    # Make sure __slots__ isn't already set.
+    if '__slots__' in cls.__dict__:
+        raise TypeError(f'{cls.__name__} already specifies __slots__')
+
+    # Create a new dict for our new class.
+    cls_dict = dict(cls.__dict__)
+    field_names = tuple(f.name for f in fields(cls))
+    cls_dict['__slots__'] = field_names
+    for field_name in field_names:
+        # Remove our attributes, if present. They'll still be
+        #  available in _MARKER.
+        cls_dict.pop(field_name, None)
+
+    # Remove __dict__ itself.
+    cls_dict.pop('__dict__', None)
+
+    # And finally create the class.
+    qualname = getattr(cls, '__qualname__', None)
+    cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
+    if qualname is not None:
+        cls.__qualname__ = qualname
+
+    if is_frozen:
+        # Need this for pickling frozen classes with slots.
+        cls.__getstate__ = _dataclass_getstate
+        cls.__setstate__ = _dataclass_setstate
+
     return cls
 
 
 def dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False,
-              unsafe_hash=False, frozen=False):
+              unsafe_hash=False, frozen=False, match_args=True,
+              kw_only=False, slots=False):
     """Returns the same class as was passed in, with dunder methods
     added based on the fields defined in the class.
 
@@ -1006,11 +1166,15 @@
     repr is true, a __repr__() method is added. If order is true, rich
     comparison dunder methods are added. If unsafe_hash is true, a
     __hash__() method function is added. If frozen is true, fields may
-    not be assigned to after instance creation.
+    not be assigned to after instance creation. If match_args is true,
+    the __match_args__ tuple is added. If kw_only is true, then by
+    default all fields are keyword-only. If slots is true, an
+    __slots__ attribute is added.
     """
 
     def wrap(cls):
-        return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
+        return _process_class(cls, init, repr, eq, order, unsafe_hash,
+                              frozen, match_args, kw_only, slots)
 
     # See if we're being called as @dataclass or @dataclass().
     if cls is None:
@@ -1047,7 +1211,7 @@
 def is_dataclass(obj):
     """Returns True if obj is a dataclass or an instance of a
     dataclass."""
-    cls = obj if isinstance(obj, type) else type(obj)
+    cls = obj if isinstance(obj, type) and not isinstance(obj, GenericAlias) else type(obj)
     return hasattr(cls, _FIELDS)
 
 
@@ -1169,7 +1333,7 @@
 
 def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
                    repr=True, eq=True, order=False, unsafe_hash=False,
-                   frozen=False):
+                   frozen=False, match_args=True, kw_only=False, slots=False):
     """Return a new dynamically created dataclass.
 
     The dataclass name will be 'cls_name'.  'fields' is an iterable
@@ -1195,14 +1359,12 @@
 
     if namespace is None:
         namespace = {}
-    else:
-        # Copy namespace since we're going to mutate it.
-        namespace = namespace.copy()
 
     # While we're looking through the field names, validate that they
     # are identifiers, are not keywords, and not duplicates.
     seen = set()
-    anns = {}
+    annotations = {}
+    defaults = {}
     for item in fields:
         if isinstance(item, str):
             name = item
@@ -1211,7 +1373,7 @@
             name, tp, = item
         elif len(item) == 3:
             name, tp, spec = item
-            namespace[name] = spec
+            defaults[name] = spec
         else:
             raise TypeError(f'Invalid field: {item!r}')
 
@@ -1223,14 +1385,22 @@
             raise TypeError(f'Field name duplicated: {name!r}')
 
         seen.add(name)
-        anns[name] = tp
+        annotations[name] = tp
 
-    namespace['__annotations__'] = anns
+    # Update 'ns' with the user-supplied namespace plus our calculated values.
+    def exec_body_callback(ns):
+        ns.update(namespace)
+        ns.update(defaults)
+        ns['__annotations__'] = annotations
+
     # We use `types.new_class()` instead of simply `type()` to allow dynamic creation
-    # of generic dataclassses.
-    cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
+    # of generic dataclasses.
+    cls = types.new_class(cls_name, bases, {}, exec_body_callback)
+
+    # Apply the normal decorator.
     return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
-                     unsafe_hash=unsafe_hash, frozen=frozen)
+                     unsafe_hash=unsafe_hash, frozen=frozen,
+                     match_args=match_args, kw_only=kw_only, slots=slots)
 
 
 def replace(obj, /, **changes):
@@ -1271,7 +1441,7 @@
             continue
 
         if f.name not in changes:
-            if f._field_type is _FIELD_INITVAR:
+            if f._field_type is _FIELD_INITVAR and f.default is MISSING:
                 raise ValueError(f"InitVar {f.name!r} "
                                  'must be specified with replace()')
             changes[f.name] = getattr(obj, f.name)
diff --git a/Lib/datetime.py b/Lib/datetime.py
index e508d99..6bf37cc 100644
--- a/Lib/datetime.py
+++ b/Lib/datetime.py
@@ -11,6 +11,7 @@
 import time as _time
 import math as _math
 import sys
+from operator import index as _index
 
 def _cmp(x, y):
     return 0 if x == y else 1 if x > y else -1
@@ -380,42 +381,10 @@
                          "-timedelta(hours=24) and timedelta(hours=24)" %
                          (name, offset))
 
-def _check_int_field(value):
-    if isinstance(value, int):
-        return value
-    if isinstance(value, float):
-        raise TypeError('integer argument expected, got float')
-    try:
-        value = value.__index__()
-    except AttributeError:
-        pass
-    else:
-        if not isinstance(value, int):
-            raise TypeError('__index__ returned non-int (type %s)' %
-                            type(value).__name__)
-        return value
-    orig = value
-    try:
-        value = value.__int__()
-    except AttributeError:
-        pass
-    else:
-        if not isinstance(value, int):
-            raise TypeError('__int__ returned non-int (type %s)' %
-                            type(value).__name__)
-        import warnings
-        warnings.warn("an integer is required (got type %s)"  %
-                      type(orig).__name__,
-                      DeprecationWarning,
-                      stacklevel=2)
-        return value
-    raise TypeError('an integer is required (got type %s)' %
-                    type(value).__name__)
-
 def _check_date_fields(year, month, day):
-    year = _check_int_field(year)
-    month = _check_int_field(month)
-    day = _check_int_field(day)
+    year = _index(year)
+    month = _index(month)
+    day = _index(day)
     if not MINYEAR <= year <= MAXYEAR:
         raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
     if not 1 <= month <= 12:
@@ -426,10 +395,10 @@
     return year, month, day
 
 def _check_time_fields(hour, minute, second, microsecond, fold):
-    hour = _check_int_field(hour)
-    minute = _check_int_field(minute)
-    second = _check_int_field(second)
-    microsecond = _check_int_field(microsecond)
+    hour = _index(hour)
+    minute = _index(minute)
+    second = _index(second)
+    microsecond = _index(microsecond)
     if not 0 <= hour <= 23:
         raise ValueError('hour must be in 0..23', hour)
     if not 0 <= minute <= 59:
@@ -2358,7 +2327,7 @@
 #    This is again a requirement for a sane tzinfo class.
 #
 # 4. (x+k).s = x.s
-#    This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
+#    This follows from #2, and that datetime.timetz+timedelta preserves tzinfo.
 #
 # 5. (x+k).n = x.n + k
 #    Again follows from how arithmetic is defined.
@@ -2541,10 +2510,10 @@
     # Clean up unused names
     del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
          _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
-         _check_date_fields, _check_int_field, _check_time_fields,
+         _check_date_fields, _check_time_fields,
          _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
          _date_class, _days_before_month, _days_before_year, _days_in_month,
-         _format_time, _format_offset, _is_leap, _isoweek1monday, _math,
+         _format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math,
          _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
          _divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
          _parse_hh_mm_ss_ff, _IsoCalendarDate)
diff --git a/Lib/difflib.py b/Lib/difflib.py
index 0dda80d..afd8a0c 100644
--- a/Lib/difflib.py
+++ b/Lib/difflib.py
@@ -62,7 +62,7 @@
     notion, pairing up elements that appear uniquely in each sequence.
     That, and the method here, appear to yield more intuitive difference
     reports than does diff.  This method appears to be the least vulnerable
-    to synching up on blocks of "junk lines", though (like blank lines in
+    to syncing up on blocks of "junk lines", though (like blank lines in
     ordinary text files, or maybe "<P>" lines in HTML files).  That may be
     because this is the only method of the 3 that has a *concept* of
     "junk" <wink>.
@@ -115,38 +115,6 @@
     case.  SequenceMatcher is quadratic time for the worst case and has
     expected-case behavior dependent in a complicated way on how many
     elements the sequences have in common; best case time is linear.
-
-    Methods:
-
-    __init__(isjunk=None, a='', b='')
-        Construct a SequenceMatcher.
-
-    set_seqs(a, b)
-        Set the two sequences to be compared.
-
-    set_seq1(a)
-        Set the first sequence to be compared.
-
-    set_seq2(b)
-        Set the second sequence to be compared.
-
-    find_longest_match(alo=0, ahi=None, blo=0, bhi=None)
-        Find longest matching block in a[alo:ahi] and b[blo:bhi].
-
-    get_matching_blocks()
-        Return list of triples describing matching subsequences.
-
-    get_opcodes()
-        Return list of 5-tuples describing how to turn a into b.
-
-    ratio()
-        Return a measure of the sequences' similarity (float in [0,1]).
-
-    quick_ratio()
-        Return an upper bound on .ratio() relatively quickly.
-
-    real_quick_ratio()
-        Return an upper bound on ratio() very quickly.
     """
 
     def __init__(self, isjunk=None, a='', b='', autojunk=True):
@@ -837,14 +805,6 @@
     +   4. Complicated is better than complex.
     ?           ++++ ^                      ^
     +   5. Flat is better than nested.
-
-    Methods:
-
-    __init__(linejunk=None, charjunk=None)
-        Construct a text differencer, with optional filters.
-
-    compare(a, b)
-        Compare two sequences of lines; generate the resulting delta.
     """
 
     def __init__(self, linejunk=None, charjunk=None):
diff --git a/Lib/dis.py b/Lib/dis.py
index e289e17..fe5d24e 100644
--- a/Lib/dis.py
+++ b/Lib/dis.py
@@ -338,8 +338,11 @@
                 argval, argrepr = _get_const_info(arg, constants)
             elif op in hasname:
                 argval, argrepr = _get_name_info(arg, names)
+            elif op in hasjabs:
+                argval = arg*2
+                argrepr = "to " + repr(argval)
             elif op in hasjrel:
-                argval = offset + 2 + arg
+                argval = offset + 2 + arg*2
                 argrepr = "to " + repr(argval)
             elif op in haslocal:
                 argval, argrepr = _get_name_info(arg, varnames)
@@ -384,7 +387,7 @@
                        constants=None, cells=None, linestarts=None,
                        *, file=None, line_offset=0):
     # Omit the line number column entirely if we have no line number info
-    show_lineno = linestarts is not None
+    show_lineno = bool(linestarts)
     if show_lineno:
         maxlineno = max(linestarts.values()) + line_offset
         if maxlineno >= 1000:
@@ -425,6 +428,7 @@
             extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
         else:
             arg = None
+            extended_arg = 0
         yield (i, op, arg)
 
 def findlabels(code):
@@ -437,9 +441,9 @@
     for offset, op, arg in _unpack_opargs(code):
         if arg is not None:
             if op in hasjrel:
-                label = offset + 2 + arg
+                label = offset + 2 + arg*2
             elif op in hasjabs:
-                label = arg
+                label = arg*2
             else:
                 continue
             if label not in labels:
@@ -449,32 +453,15 @@
 def findlinestarts(code):
     """Find the offsets in a byte code which are start of lines in the source.
 
-    Generate pairs (offset, lineno) as described in Python/compile.c.
-
+    Generate pairs (offset, lineno)
     """
-    byte_increments = code.co_lnotab[0::2]
-    line_increments = code.co_lnotab[1::2]
-    bytecode_len = len(code.co_code)
+    lastline = None
+    for start, end, line in code.co_lines():
+        if line is not None and line != lastline:
+            lastline = line
+            yield start, line
+    return
 
-    lastlineno = None
-    lineno = code.co_firstlineno
-    addr = 0
-    for byte_incr, line_incr in zip(byte_increments, line_increments):
-        if byte_incr:
-            if lineno != lastlineno:
-                yield (addr, lineno)
-                lastlineno = lineno
-            addr += byte_incr
-            if addr >= bytecode_len:
-                # The rest of the lnotab byte offsets are past the end of
-                # the bytecode, so the lines were optimized away.
-                return
-        if line_incr >= 0x80:
-            # line_increments is an array of 8-bit signed integers
-            line_incr -= 0x100
-        lineno += line_incr
-    if lineno != lastlineno:
-        yield (addr, lineno)
 
 class Bytecode:
     """The bytecode operations of a piece of code
diff --git a/Lib/distutils/README b/Lib/distutils/README
index 23f4885..73bd251 100644
--- a/Lib/distutils/README
+++ b/Lib/distutils/README
@@ -2,10 +2,10 @@
 
 There's a full documentation available at:
 
-    http://docs.python.org/distutils/
+    https://docs.python.org/distutils/
 
 The Distutils-SIG web page is also a good starting point:
 
-    http://www.python.org/sigs/distutils-sig/
+    https://www.python.org/sigs/distutils-sig/
 
 $Id$
diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py
index d823d04..fdad6f6 100644
--- a/Lib/distutils/__init__.py
+++ b/Lib/distutils/__init__.py
@@ -9,5 +9,12 @@
 """
 
 import sys
+import warnings
 
 __version__ = sys.version[:sys.version.index(' ')]
+
+_DEPRECATION_MESSAGE = ("The distutils package is deprecated and slated for "
+                        "removal in Python 3.12. Use setuptools or check "
+                        "PEP 632 for potential alternatives")
+warnings.warn(_DEPRECATION_MESSAGE,
+              DeprecationWarning, 2)
diff --git a/Lib/distutils/ccompiler.py b/Lib/distutils/ccompiler.py
index b5ef143..4c47f2e 100644
--- a/Lib/distutils/ccompiler.py
+++ b/Lib/distutils/ccompiler.py
@@ -392,7 +392,7 @@
         return output_dir, macros, include_dirs
 
     def _prep_compile(self, sources, output_dir, depends=None):
-        """Decide which souce files must be recompiled.
+        """Decide which source files must be recompiled.
 
         Determine the list of object files corresponding to 'sources',
         and figure out which ones really need to be recompiled.
diff --git a/Lib/distutils/command/__init__.py b/Lib/distutils/command/__init__.py
index 481eea9..fd0bfae 100644
--- a/Lib/distutils/command/__init__.py
+++ b/Lib/distutils/command/__init__.py
@@ -19,7 +19,6 @@
            'bdist',
            'bdist_dumb',
            'bdist_rpm',
-           'bdist_wininst',
            'check',
            'upload',
            # These two are reserved for future use:
diff --git a/Lib/distutils/command/bdist.py b/Lib/distutils/command/bdist.py
index 014871d..d580a80 100644
--- a/Lib/distutils/command/bdist.py
+++ b/Lib/distutils/command/bdist.py
@@ -62,7 +62,7 @@
 
     # Establish the preferred order (for the --help-formats option).
     format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
-                       'wininst', 'zip', 'msi']
+                       'zip', 'msi']
 
     # And the real information.
     format_command = {'rpm':   ('bdist_rpm',  "RPM distribution"),
@@ -71,8 +71,6 @@
                       'xztar': ('bdist_dumb', "xz'ed tar file"),
                       'ztar':  ('bdist_dumb', "compressed tar file"),
                       'tar':   ('bdist_dumb', "tar file"),
-                      'wininst': ('bdist_wininst',
-                                  "Windows executable installer"),
                       'zip':   ('bdist_dumb', "ZIP file"),
                       'msi':   ('bdist_msi',  "Microsoft Installer")
                       }
diff --git a/Lib/distutils/command/bdist_msi.py b/Lib/distutils/command/bdist_msi.py
index 0863a18..2ed017b 100644
--- a/Lib/distutils/command/bdist_msi.py
+++ b/Lib/distutils/command/bdist_msi.py
@@ -1,7 +1,5 @@
 # Copyright (C) 2005, 2006 Martin von Löwis
 # Licensed to PSF under a Contributor Agreement.
-# The bdist_wininst command proper
-# based on bdist_wininst
 """
 Implements the bdist_msi command.
 """
diff --git a/Lib/distutils/command/bdist_wininst.py b/Lib/distutils/command/bdist_wininst.py
deleted file mode 100644
index 6e9b49f..0000000
--- a/Lib/distutils/command/bdist_wininst.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""distutils.command.bdist_wininst
-
-Suppress the 'bdist_wininst' command, while still allowing
-setuptools to import it without breaking."""
-
-from distutils.core import Command
-from distutils.errors import DistutilsPlatformError
-
-
-class bdist_wininst(Command):
-    description = "create an executable installer for MS Windows"
-
-    # Marker for tests that we have the unsupported bdist_wininst
-    _unsupported = True
-
-    def initialize_options(self):
-        pass
-
-    def finalize_options(self):
-        pass
-
-    def run(self):
-        raise DistutilsPlatformError(
-            "bdist_wininst is not supported in this Python distribution"
-        )
diff --git a/Lib/distutils/command/check.py b/Lib/distutils/command/check.py
index ada2500..73a30f3 100644
--- a/Lib/distutils/command/check.py
+++ b/Lib/distutils/command/check.py
@@ -83,7 +83,7 @@
             name, version, URL
 
         Recommended fields:
-            (author and author_email) or (maintainer and maintainer_email))
+            (author and author_email) or (maintainer and maintainer_email)
 
         Warns if any are missing.
         """
diff --git a/Lib/distutils/command/install.py b/Lib/distutils/command/install.py
index aaa300e..01d5331 100644
--- a/Lib/distutils/command/install.py
+++ b/Lib/distutils/command/install.py
@@ -3,7 +3,9 @@
 Implements the Distutils 'install' command."""
 
 import sys
+import sysconfig
 import os
+import re
 
 from distutils import log
 from distutils.core import Command
@@ -17,35 +19,55 @@
 
 from site import USER_BASE
 from site import USER_SITE
-HAS_USER_SITE = True
 
-WINDOWS_SCHEME = {
-    'purelib': '$base/Lib/site-packages',
-    'platlib': '$base/Lib/site-packages',
-    'headers': '$base/Include/$dist_name',
-    'scripts': '$base/Scripts',
-    'data'   : '$base',
-}
+HAS_USER_SITE = (USER_SITE is not None)
 
-INSTALL_SCHEMES = {
-    'unix_prefix': {
-        'purelib': '$base/lib/python$py_version_short/site-packages',
-        'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages',
-        'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
-        'scripts': '$base/bin',
-        'data'   : '$base',
-        },
-    'unix_home': {
-        'purelib': '$base/lib/python',
-        'platlib': '$base/$platlibdir/python',
-        'headers': '$base/include/python/$dist_name',
-        'scripts': '$base/bin',
-        'data'   : '$base',
-        },
-    'nt': WINDOWS_SCHEME,
-    }
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every scheme in
+# sysconfig._INSTALL_SCHEMES, and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
 
-# user site schemes
+# The following code provides backward-compatible INSTALL_SCHEMES
+# while making the sysconfig module the single point of truth.
+# This makes it easier for OS distributions where they need to
+# alter locations for packages installations in a single place.
+# Note that this module is deprecated (PEP 632); all consumers
+# of this information should switch to using sysconfig directly.
+INSTALL_SCHEMES = {"unix_prefix": {}, "unix_home": {}, "nt": {}}
+
+# Copy from sysconfig._INSTALL_SCHEMES
+for key in SCHEME_KEYS:
+    for distutils_scheme_name, sys_scheme_name in (
+            ("unix_prefix", "posix_prefix"), ("unix_home", "posix_home"),
+            ("nt", "nt")):
+        sys_key = key
+        sys_scheme = sysconfig._INSTALL_SCHEMES[sys_scheme_name]
+        if key == "headers" and key not in sys_scheme:
+            # On POSIX-y platforms, Python will:
+            # - Build from .h files in 'headers' (only there when
+            #   building CPython)
+            # - Install .h files to 'include'
+            # When 'headers' is missing, fall back to 'include'
+            sys_key = 'include'
+        INSTALL_SCHEMES[distutils_scheme_name][key] = sys_scheme[sys_key]
+
+# Transformation to different template format
+for main_key in INSTALL_SCHEMES:
+    for key, value in INSTALL_SCHEMES[main_key].items():
+        # Change all ocurences of {variable} to $variable
+        value = re.sub(r"\{(.+?)\}", r"$\g<1>", value)
+        value = value.replace("$installed_base", "$base")
+        value = value.replace("$py_version_nodot_plat", "$py_version_nodot")
+        if key == "headers":
+            value += "/$dist_name"
+        if sys.version_info >= (3, 9) and key == "platlib":
+            # platlibdir is available since 3.9: bpo-1294959
+            value = value.replace("/lib/", "/$platlibdir/")
+        INSTALL_SCHEMES[main_key][key] = value
+
+# The following part of INSTALL_SCHEMES has a different definition
+# than the one in sysconfig, but because both depend on the site module,
+# the outcomes should be the same.
 if HAS_USER_SITE:
     INSTALL_SCHEMES['nt_user'] = {
         'purelib': '$usersite',
@@ -64,11 +86,6 @@
         'data'   : '$userbase',
         }
 
-# The keys to an installation scheme; if any new types of files are to be
-# installed, be sure to add an entry to every installation scheme above,
-# and to SCHEME_KEYS here.
-SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
-
 
 class install(Command):
 
@@ -169,8 +186,9 @@
         self.install_lib = None         # set to either purelib or platlib
         self.install_scripts = None
         self.install_data = None
-        self.install_userbase = USER_BASE
-        self.install_usersite = USER_SITE
+        if HAS_USER_SITE:
+            self.install_userbase = USER_BASE
+            self.install_usersite = USER_SITE
 
         self.compile = None
         self.optimize = None
@@ -305,6 +323,9 @@
             self.config_vars['userbase'] = self.install_userbase
             self.config_vars['usersite'] = self.install_usersite
 
+        if sysconfig.is_python_build(True):
+            self.config_vars['srcdir'] = sysconfig.get_config_var('srcdir')
+
         self.expand_basedirs()
 
         self.dump_dirs("post-expand_basedirs()")
@@ -343,8 +364,9 @@
         # Convert directories from Unix /-separated syntax to the local
         # convention.
         self.convert_paths('lib', 'purelib', 'platlib',
-                           'scripts', 'data', 'headers',
-                           'userbase', 'usersite')
+                           'scripts', 'data', 'headers')
+        if HAS_USER_SITE:
+            self.convert_paths('userbase', 'usersite')
 
         # Deprecated
         # Well, we're not actually fully completely finalized yet: we still
diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py
index 95e9fda..e0ecb65 100644
--- a/Lib/distutils/command/upload.py
+++ b/Lib/distutils/command/upload.py
@@ -9,7 +9,8 @@
 import io
 import hashlib
 from base64 import standard_b64encode
-from urllib.request import urlopen, Request, HTTPError
+from urllib.error import HTTPError
+from urllib.request import urlopen, Request
 from urllib.parse import urlparse
 from distutils.errors import DistutilsError, DistutilsOptionError
 from distutils.core import PyPIRCCommand
diff --git a/Lib/distutils/extension.py b/Lib/distutils/extension.py
index c507da3..e85032e 100644
--- a/Lib/distutils/extension.py
+++ b/Lib/distutils/extension.py
@@ -4,6 +4,7 @@
 modules in setup scripts."""
 
 import os
+import re
 import warnings
 
 # This class is really only used by the "build_ext" command, so it might
@@ -161,7 +162,7 @@
             line = file.readline()
             if line is None:                # eof
                 break
-            if _variable_rx.match(line):    # VAR=VALUE, handled in first pass
+            if re.match(_variable_rx, line):    # VAR=VALUE, handled in first pass
                 continue
 
             if line[0] == line[-1] == "*":
diff --git a/Lib/distutils/msvc9compiler.py b/Lib/distutils/msvc9compiler.py
index 6934e96..a7976fb 100644
--- a/Lib/distutils/msvc9compiler.py
+++ b/Lib/distutils/msvc9compiler.py
@@ -673,7 +673,7 @@
         # If a manifest should be embedded, return a tuple of
         # (manifest_filename, resource_id).  Returns None if no manifest
         # should be embedded.  See http://bugs.python.org/issue7833 for why
-        # we want to avoid any manifest for extension modules if we can)
+        # we want to avoid any manifest for extension modules if we can.
         for arg in ld_args:
             if arg.startswith("/MANIFESTFILE:"):
                 temp_manifest = arg.split(":", 1)[1]
diff --git a/Lib/distutils/spawn.py b/Lib/distutils/spawn.py
index f50edd2..31df3f7 100644
--- a/Lib/distutils/spawn.py
+++ b/Lib/distutils/spawn.py
@@ -54,18 +54,22 @@
         global _cfg_target, _cfg_target_split
         if _cfg_target is None:
             from distutils import sysconfig
-            _cfg_target = str(sysconfig.get_config_var(
-                                  'MACOSX_DEPLOYMENT_TARGET') or '')
+            _cfg_target = sysconfig.get_config_var(
+                                  'MACOSX_DEPLOYMENT_TARGET') or ''
             if _cfg_target:
                 _cfg_target_split = [int(x) for x in _cfg_target.split('.')]
         if _cfg_target:
-            # ensure that the deployment target of build process is not less
-            # than that used when the interpreter was built. This ensures
-            # extension modules are built with correct compatibility values
+            # Ensure that the deployment target of the build process is not
+            # less than 10.3 if the interpreter was built for 10.3 or later.
+            # This ensures extension modules are built with correct
+            # compatibility values, specifically LDSHARED which can use
+            # '-undefined dynamic_lookup' which only works on >= 10.3.
             cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
-            if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
+            cur_target_split = [int(x) for x in cur_target.split('.')]
+            if _cfg_target_split[:2] >= [10, 3] and cur_target_split[:2] < [10, 3]:
                 my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
-                          'now "%s" but "%s" during configure'
+                          'now "%s" but "%s" during configure;'
+                          'must use 10.3 or later'
                                 % (cur_target, _cfg_target))
                 raise DistutilsPlatformError(my_msg)
             env = dict(os.environ,
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index 37feae5..3414a76 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -13,56 +13,174 @@
 import os
 import re
 import sys
+import warnings
+
+from functools import partial
 
 from .errors import DistutilsPlatformError
 
-# These are needed in a couple of spots, so just compute them once.
-PREFIX = os.path.normpath(sys.prefix)
-EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
-BASE_PREFIX = os.path.normpath(sys.base_prefix)
-BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
+from sysconfig import (
+    _PREFIX as PREFIX,
+    _BASE_PREFIX as BASE_PREFIX,
+    _EXEC_PREFIX as EXEC_PREFIX,
+    _BASE_EXEC_PREFIX as BASE_EXEC_PREFIX,
+    _PROJECT_BASE as project_base,
+    _PYTHON_BUILD as python_build,
+    _init_posix as sysconfig_init_posix,
+    parse_config_h as sysconfig_parse_config_h,
 
-# Path to the base directory of the project. On Windows the binary may
-# live in project/PCbuild/win32 or project/PCbuild/amd64.
-# set for cross builds
-if "_PYTHON_PROJECT_BASE" in os.environ:
-    project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
-else:
-    if sys.executable:
-        project_base = os.path.dirname(os.path.abspath(sys.executable))
-    else:
-        # sys.executable can be empty if argv[0] has been changed and Python is
-        # unable to retrieve the real program name
-        project_base = os.getcwd()
+    _init_non_posix,
+    _is_python_source_dir,
+    _sys_home,
+
+    _variable_rx,
+    _findvar1_rx,
+    _findvar2_rx,
+
+    expand_makefile_vars,
+    is_python_build,
+    get_config_h_filename,
+    get_config_var,
+    get_config_vars,
+    get_makefile_filename,
+    get_python_version,
+)
+
+# This is better than
+# from sysconfig import _CONFIG_VARS as _config_vars
+# because it makes sure that the global dictionary is initialized
+# which might not be true in the time of import.
+_config_vars = get_config_vars()
+
+if os.name == "nt":
+    from sysconfig import _fix_pcbuild
+
+warnings.warn(
+    'The distutils.sysconfig module is deprecated, use sysconfig instead',
+    DeprecationWarning,
+    stacklevel=2
+)
 
 
-# python_build: (Boolean) if true, we're either building Python or
-# building an extension with an un-installed Python, so we use
-# different (hard-wired) directories.
-def _is_python_source_dir(d):
-    for fn in ("Setup", "Setup.local"):
-        if os.path.isfile(os.path.join(d, "Modules", fn)):
-            return True
-    return False
+# Following functions are the same as in sysconfig but with different API
+def parse_config_h(fp, g=None):
+    return sysconfig_parse_config_h(fp, vars=g)
 
-_sys_home = getattr(sys, '_home', None)
 
-if os.name == 'nt':
-    def _fix_pcbuild(d):
-        if d and os.path.normcase(d).startswith(
-                os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
-            return PREFIX
-        return d
-    project_base = _fix_pcbuild(project_base)
-    _sys_home = _fix_pcbuild(_sys_home)
+_python_build = partial(is_python_build, check_home=True)
+_init_posix = partial(sysconfig_init_posix, _config_vars)
+_init_nt = partial(_init_non_posix, _config_vars)
 
-def _python_build():
-    if _sys_home:
-        return _is_python_source_dir(_sys_home)
-    return _is_python_source_dir(project_base)
 
-python_build = _python_build()
+# Similar function is also implemented in sysconfig as _parse_makefile
+# but without the parsing capabilities of distutils.text_file.TextFile.
+def parse_makefile(fn, g=None):
+    """Parse a Makefile-style file.
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    from distutils.text_file import TextFile
+    fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
 
+    if g is None:
+        g = {}
+    done = {}
+    notdone = {}
+
+    while True:
+        line = fp.readline()
+        if line is None: # eof
+            break
+        m = re.match(_variable_rx, line)
+        if m:
+            n, v = m.group(1, 2)
+            v = v.strip()
+            # `$$' is a literal `$' in make
+            tmpv = v.replace('$$', '')
+
+            if "$" in tmpv:
+                notdone[n] = v
+            else:
+                try:
+                    v = int(v)
+                except ValueError:
+                    # insert literal `$'
+                    done[n] = v.replace('$$', '$')
+                else:
+                    done[n] = v
+
+    # Variables with a 'PY_' prefix in the makefile. These need to
+    # be made available without that prefix through sysconfig.
+    # Special care is needed to ensure that variable expansion works, even
+    # if the expansion uses the name without a prefix.
+    renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
+
+    # do variable interpolation here
+    while notdone:
+        for name in list(notdone):
+            value = notdone[name]
+            m = re.search(_findvar1_rx, value) or re.search(_findvar2_rx, value)
+            if m:
+                n = m.group(1)
+                found = True
+                if n in done:
+                    item = str(done[n])
+                elif n in notdone:
+                    # get it on a subsequent round
+                    found = False
+                elif n in os.environ:
+                    # do it like make: fall back to environment
+                    item = os.environ[n]
+
+                elif n in renamed_variables:
+                    if name.startswith('PY_') and name[3:] in renamed_variables:
+                        item = ""
+
+                    elif 'PY_' + n in notdone:
+                        found = False
+
+                    else:
+                        item = str(done['PY_' + n])
+                else:
+                    done[n] = item = ""
+                if found:
+                    after = value[m.end():]
+                    value = value[:m.start()] + item + after
+                    if "$" in after:
+                        notdone[name] = value
+                    else:
+                        try: value = int(value)
+                        except ValueError:
+                            done[name] = value.strip()
+                        else:
+                            done[name] = value
+                        del notdone[name]
+
+                        if name.startswith('PY_') \
+                            and name[3:] in renamed_variables:
+
+                            name = name[3:]
+                            if name not in done:
+                                done[name] = value
+            else:
+                # bogus variable reference; just drop it since we can't deal
+                del notdone[name]
+
+    fp.close()
+
+    # strip spurious spaces
+    for k, v in done.items():
+        if isinstance(v, str):
+            done[k] = v.strip()
+
+    # save the results in the global dictionary
+    g.update(done)
+    return g
+
+
+# Following functions are deprecated together with this module and they
+# have no direct replacement
 
 # Calculate the build qualifier flags if they are defined.  Adding the flags
 # to the include and lib directories only makes sense for an installation, not
@@ -76,12 +194,76 @@
     # this attribute, which is fine.
     pass
 
-def get_python_version():
-    """Return a string containing the major and minor Python version,
-    leaving off the patchlevel.  Sample return values could be '1.5'
-    or '2.2'.
+
+def customize_compiler(compiler):
+    """Do any platform-specific customization of a CCompiler instance.
+
+    Mainly needed on Unix, so we can plug in the information that
+    varies across Unices and is stored in Python's Makefile.
     """
-    return '%d.%d' % sys.version_info[:2]
+    if compiler.compiler_type == "unix":
+        if sys.platform == "darwin":
+            # Perform first-time customization of compiler-related
+            # config vars on OS X now that we know we need a compiler.
+            # This is primarily to support Pythons from binary
+            # installers.  The kind and paths to build tools on
+            # the user system may vary significantly from the system
+            # that Python itself was built on.  Also the user OS
+            # version and build tools may not support the same set
+            # of CPU architectures for universal builds.
+            if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'):
+                import _osx_support
+                _osx_support.customize_compiler(_config_vars)
+                _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
+        (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
+            get_config_vars('CC', 'CXX', 'CFLAGS',
+                            'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
+
+        if 'CC' in os.environ:
+            newcc = os.environ['CC']
+            if (sys.platform == 'darwin'
+                    and 'LDSHARED' not in os.environ
+                    and ldshared.startswith(cc)):
+                # On OS X, if CC is overridden, use that as the default
+                #       command for LDSHARED as well
+                ldshared = newcc + ldshared[len(cc):]
+            cc = newcc
+        if 'CXX' in os.environ:
+            cxx = os.environ['CXX']
+        if 'LDSHARED' in os.environ:
+            ldshared = os.environ['LDSHARED']
+        if 'CPP' in os.environ:
+            cpp = os.environ['CPP']
+        else:
+            cpp = cc + " -E"           # not always
+        if 'LDFLAGS' in os.environ:
+            ldshared = ldshared + ' ' + os.environ['LDFLAGS']
+        if 'CFLAGS' in os.environ:
+            cflags = cflags + ' ' + os.environ['CFLAGS']
+            ldshared = ldshared + ' ' + os.environ['CFLAGS']
+        if 'CPPFLAGS' in os.environ:
+            cpp = cpp + ' ' + os.environ['CPPFLAGS']
+            cflags = cflags + ' ' + os.environ['CPPFLAGS']
+            ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
+        if 'AR' in os.environ:
+            ar = os.environ['AR']
+        if 'ARFLAGS' in os.environ:
+            archiver = ar + ' ' + os.environ['ARFLAGS']
+        else:
+            archiver = ar + ' ' + ar_flags
+
+        cc_cmd = cc + ' ' + cflags
+        compiler.set_executables(
+            preprocessor=cpp,
+            compiler=cc_cmd,
+            compiler_so=cc_cmd + ' ' + ccshared,
+            compiler_cxx=cxx,
+            linker_so=ldshared,
+            linker_exe=cc,
+            archiver=archiver)
+
+        compiler.shared_lib_extension = shlib_suffix
 
 
 def get_python_inc(plat_specific=0, prefix=None):
@@ -167,389 +349,3 @@
         raise DistutilsPlatformError(
             "I don't know where Python installs its library "
             "on platform '%s'" % os.name)
-
-
-
-def customize_compiler(compiler):
-    """Do any platform-specific customization of a CCompiler instance.
-
-    Mainly needed on Unix, so we can plug in the information that
-    varies across Unices and is stored in Python's Makefile.
-    """
-    if compiler.compiler_type == "unix":
-        if sys.platform == "darwin":
-            # Perform first-time customization of compiler-related
-            # config vars on OS X now that we know we need a compiler.
-            # This is primarily to support Pythons from binary
-            # installers.  The kind and paths to build tools on
-            # the user system may vary significantly from the system
-            # that Python itself was built on.  Also the user OS
-            # version and build tools may not support the same set
-            # of CPU architectures for universal builds.
-            global _config_vars
-            # Use get_config_var() to ensure _config_vars is initialized.
-            if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
-                import _osx_support
-                _osx_support.customize_compiler(_config_vars)
-                _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
-
-        (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
-            get_config_vars('CC', 'CXX', 'CFLAGS',
-                            'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
-
-        if 'CC' in os.environ:
-            newcc = os.environ['CC']
-            if (sys.platform == 'darwin'
-                    and 'LDSHARED' not in os.environ
-                    and ldshared.startswith(cc)):
-                # On OS X, if CC is overridden, use that as the default
-                #       command for LDSHARED as well
-                ldshared = newcc + ldshared[len(cc):]
-            cc = newcc
-        if 'CXX' in os.environ:
-            cxx = os.environ['CXX']
-        if 'LDSHARED' in os.environ:
-            ldshared = os.environ['LDSHARED']
-        if 'CPP' in os.environ:
-            cpp = os.environ['CPP']
-        else:
-            cpp = cc + " -E"           # not always
-        if 'LDFLAGS' in os.environ:
-            ldshared = ldshared + ' ' + os.environ['LDFLAGS']
-        if 'CFLAGS' in os.environ:
-            cflags = cflags + ' ' + os.environ['CFLAGS']
-            ldshared = ldshared + ' ' + os.environ['CFLAGS']
-        if 'CPPFLAGS' in os.environ:
-            cpp = cpp + ' ' + os.environ['CPPFLAGS']
-            cflags = cflags + ' ' + os.environ['CPPFLAGS']
-            ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
-        if 'AR' in os.environ:
-            ar = os.environ['AR']
-        if 'ARFLAGS' in os.environ:
-            archiver = ar + ' ' + os.environ['ARFLAGS']
-        else:
-            archiver = ar + ' ' + ar_flags
-
-        cc_cmd = cc + ' ' + cflags
-        compiler.set_executables(
-            preprocessor=cpp,
-            compiler=cc_cmd,
-            compiler_so=cc_cmd + ' ' + ccshared,
-            compiler_cxx=cxx,
-            linker_so=ldshared,
-            linker_exe=cc,
-            archiver=archiver)
-
-        compiler.shared_lib_extension = shlib_suffix
-
-
-def get_config_h_filename():
-    """Return full pathname of installed pyconfig.h file."""
-    if python_build:
-        if os.name == "nt":
-            inc_dir = os.path.join(_sys_home or project_base, "PC")
-        else:
-            inc_dir = _sys_home or project_base
-    else:
-        inc_dir = get_python_inc(plat_specific=1)
-
-    return os.path.join(inc_dir, 'pyconfig.h')
-
-
-def get_makefile_filename():
-    """Return full pathname of installed Makefile from the Python build."""
-    if python_build:
-        return os.path.join(_sys_home or project_base, "Makefile")
-    lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
-    config_file = 'config-{}{}'.format(get_python_version(), build_flags)
-    if hasattr(sys.implementation, '_multiarch'):
-        config_file += '-%s' % sys.implementation._multiarch
-    return os.path.join(lib_dir, config_file, 'Makefile')
-
-
-def parse_config_h(fp, g=None):
-    """Parse a config.h-style file.
-
-    A dictionary containing name/value pairs is returned.  If an
-    optional dictionary is passed in as the second argument, it is
-    used instead of a new dictionary.
-    """
-    if g is None:
-        g = {}
-    define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
-    undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
-    #
-    while True:
-        line = fp.readline()
-        if not line:
-            break
-        m = define_rx.match(line)
-        if m:
-            n, v = m.group(1, 2)
-            try: v = int(v)
-            except ValueError: pass
-            g[n] = v
-        else:
-            m = undef_rx.match(line)
-            if m:
-                g[m.group(1)] = 0
-    return g
-
-
-# Regexes needed for parsing Makefile (and similar syntaxes,
-# like old-style Setup files).
-_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
-_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
-_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
-
-def parse_makefile(fn, g=None):
-    """Parse a Makefile-style file.
-
-    A dictionary containing name/value pairs is returned.  If an
-    optional dictionary is passed in as the second argument, it is
-    used instead of a new dictionary.
-    """
-    from distutils.text_file import TextFile
-    fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
-
-    if g is None:
-        g = {}
-    done = {}
-    notdone = {}
-
-    while True:
-        line = fp.readline()
-        if line is None: # eof
-            break
-        m = _variable_rx.match(line)
-        if m:
-            n, v = m.group(1, 2)
-            v = v.strip()
-            # `$$' is a literal `$' in make
-            tmpv = v.replace('$$', '')
-
-            if "$" in tmpv:
-                notdone[n] = v
-            else:
-                try:
-                    v = int(v)
-                except ValueError:
-                    # insert literal `$'
-                    done[n] = v.replace('$$', '$')
-                else:
-                    done[n] = v
-
-    # Variables with a 'PY_' prefix in the makefile. These need to
-    # be made available without that prefix through sysconfig.
-    # Special care is needed to ensure that variable expansion works, even
-    # if the expansion uses the name without a prefix.
-    renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
-
-    # do variable interpolation here
-    while notdone:
-        for name in list(notdone):
-            value = notdone[name]
-            m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
-            if m:
-                n = m.group(1)
-                found = True
-                if n in done:
-                    item = str(done[n])
-                elif n in notdone:
-                    # get it on a subsequent round
-                    found = False
-                elif n in os.environ:
-                    # do it like make: fall back to environment
-                    item = os.environ[n]
-
-                elif n in renamed_variables:
-                    if name.startswith('PY_') and name[3:] in renamed_variables:
-                        item = ""
-
-                    elif 'PY_' + n in notdone:
-                        found = False
-
-                    else:
-                        item = str(done['PY_' + n])
-                else:
-                    done[n] = item = ""
-                if found:
-                    after = value[m.end():]
-                    value = value[:m.start()] + item + after
-                    if "$" in after:
-                        notdone[name] = value
-                    else:
-                        try: value = int(value)
-                        except ValueError:
-                            done[name] = value.strip()
-                        else:
-                            done[name] = value
-                        del notdone[name]
-
-                        if name.startswith('PY_') \
-                            and name[3:] in renamed_variables:
-
-                            name = name[3:]
-                            if name not in done:
-                                done[name] = value
-            else:
-                # bogus variable reference; just drop it since we can't deal
-                del notdone[name]
-
-    fp.close()
-
-    # strip spurious spaces
-    for k, v in done.items():
-        if isinstance(v, str):
-            done[k] = v.strip()
-
-    # save the results in the global dictionary
-    g.update(done)
-    return g
-
-
-def expand_makefile_vars(s, vars):
-    """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
-    'string' according to 'vars' (a dictionary mapping variable names to
-    values).  Variables not present in 'vars' are silently expanded to the
-    empty string.  The variable values in 'vars' should not contain further
-    variable expansions; if 'vars' is the output of 'parse_makefile()',
-    you're fine.  Returns a variable-expanded version of 's'.
-    """
-
-    # This algorithm does multiple expansion, so if vars['foo'] contains
-    # "${bar}", it will expand ${foo} to ${bar}, and then expand
-    # ${bar}... and so forth.  This is fine as long as 'vars' comes from
-    # 'parse_makefile()', which takes care of such expansions eagerly,
-    # according to make's variable expansion semantics.
-
-    while True:
-        m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
-        if m:
-            (beg, end) = m.span()
-            s = s[0:beg] + vars.get(m.group(1)) + s[end:]
-        else:
-            break
-    return s
-
-
-_config_vars = None
-
-def _init_posix():
-    """Initialize the module as appropriate for POSIX systems."""
-    # _sysconfigdata is generated at build time, see the sysconfig module
-    name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
-        '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
-        abi=sys.abiflags,
-        platform=sys.platform,
-        multiarch=getattr(sys.implementation, '_multiarch', ''),
-    ))
-    _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
-    build_time_vars = _temp.build_time_vars
-    global _config_vars
-    _config_vars = {}
-    _config_vars.update(build_time_vars)
-
-
-def _init_nt():
-    """Initialize the module as appropriate for NT"""
-    g = {}
-    # set basic install directories
-    g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
-    g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
-
-    # XXX hmmm.. a normal install puts include files here
-    g['INCLUDEPY'] = get_python_inc(plat_specific=0)
-
-    g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
-    g['EXE'] = ".exe"
-    g['VERSION'] = get_python_version().replace(".", "")
-    g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
-
-    global _config_vars
-    _config_vars = g
-
-
-def get_config_vars(*args):
-    """With no arguments, return a dictionary of all configuration
-    variables relevant for the current platform.  Generally this includes
-    everything needed to build extensions and install both pure modules and
-    extensions.  On Unix, this means every variable defined in Python's
-    installed Makefile; on Windows it's a much smaller set.
-
-    With arguments, return a list of values that result from looking up
-    each argument in the configuration variable dictionary.
-    """
-    global _config_vars
-    if _config_vars is None:
-        func = globals().get("_init_" + os.name)
-        if func:
-            func()
-        else:
-            _config_vars = {}
-
-        # Normalized versions of prefix and exec_prefix are handy to have;
-        # in fact, these are the standard versions used most places in the
-        # Distutils.
-        _config_vars['prefix'] = PREFIX
-        _config_vars['exec_prefix'] = EXEC_PREFIX
-
-        # For backward compatibility, see issue19555
-        SO = _config_vars.get('EXT_SUFFIX')
-        if SO is not None:
-            _config_vars['SO'] = SO
-
-        # Always convert srcdir to an absolute path
-        srcdir = _config_vars.get('srcdir', project_base)
-        if os.name == 'posix':
-            if python_build:
-                # If srcdir is a relative path (typically '.' or '..')
-                # then it should be interpreted relative to the directory
-                # containing Makefile.
-                base = os.path.dirname(get_makefile_filename())
-                srcdir = os.path.join(base, srcdir)
-            else:
-                # srcdir is not meaningful since the installation is
-                # spread about the filesystem.  We choose the
-                # directory containing the Makefile since we know it
-                # exists.
-                srcdir = os.path.dirname(get_makefile_filename())
-        _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
-
-        # Convert srcdir into an absolute path if it appears necessary.
-        # Normally it is relative to the build directory.  However, during
-        # testing, for example, we might be running a non-installed python
-        # from a different directory.
-        if python_build and os.name == "posix":
-            base = project_base
-            if (not os.path.isabs(_config_vars['srcdir']) and
-                base != os.getcwd()):
-                # srcdir is relative and we are not in the same directory
-                # as the executable. Assume executable is in the build
-                # directory and make srcdir absolute.
-                srcdir = os.path.join(base, _config_vars['srcdir'])
-                _config_vars['srcdir'] = os.path.normpath(srcdir)
-
-        # OS X platforms require special customization to handle
-        # multi-architecture, multi-os-version installers
-        if sys.platform == 'darwin':
-            import _osx_support
-            _osx_support.customize_config_vars(_config_vars)
-
-    if args:
-        vals = []
-        for name in args:
-            vals.append(_config_vars.get(name))
-        return vals
-    else:
-        return _config_vars
-
-def get_config_var(name):
-    """Return the value of a single variable using the dictionary
-    returned by 'get_config_vars()'.  Equivalent to
-    get_config_vars().get(name)
-    """
-    if name == 'SO':
-        import warnings
-        warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
-    return get_config_vars().get(name)
diff --git a/Lib/distutils/unixccompiler.py b/Lib/distutils/unixccompiler.py
index f0792de..d00c489 100644
--- a/Lib/distutils/unixccompiler.py
+++ b/Lib/distutils/unixccompiler.py
@@ -215,7 +215,8 @@
         return "-L" + dir
 
     def _is_gcc(self, compiler_name):
-        return "gcc" in compiler_name or "g++" in compiler_name
+        # clang uses same syntax for rpath as gcc
+        return any(name in compiler_name for name in ("gcc", "g++", "clang"))
 
     def runtime_library_dir_option(self, dir):
         # XXX Hackish, at the very least.  See Python bug #445902:
diff --git a/Lib/distutils/util.py b/Lib/distutils/util.py
index 4b002ec..2ce5c5b 100644
--- a/Lib/distutils/util.py
+++ b/Lib/distutils/util.py
@@ -9,6 +9,7 @@
 import importlib.util
 import string
 import sys
+import distutils
 from distutils.errors import DistutilsPlatformError
 from distutils.dep_util import newer
 from distutils.spawn import spawn
@@ -419,8 +420,10 @@
              direct=1)
 """ % (optimize, force, prefix, base_dir, verbose))
 
+        msg = distutils._DEPRECATION_MESSAGE
         cmd = [sys.executable]
         cmd.extend(subprocess._optim_args_from_interpreter_flags())
+        cmd.append(f'-Wignore:{msg}:DeprecationWarning')
         cmd.append(script_name)
         spawn(cmd, dry_run=dry_run)
         execute(os.remove, (script_name,), "removing %s" % script_name,
diff --git a/Lib/doctest.py b/Lib/doctest.py
index baa503c..b27cbdf 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -102,7 +102,7 @@
 import sys
 import traceback
 import unittest
-from io import StringIO
+from io import StringIO, IncrementalNewlineDecoder
 from collections import namedtuple
 
 TestResults = namedtuple('TestResults', 'failed attempted')
@@ -212,23 +212,24 @@
         raise TypeError("Expected a module, string, or None")
 
 def _newline_convert(data):
-    # We have two cases to cover and we need to make sure we do
-    # them in the right order
-    for newline in ('\r\n', '\r'):
-        data = data.replace(newline, '\n')
-    return data
+    # The IO module provides a handy decoder for universal newline conversion
+    return IncrementalNewlineDecoder(None, True).decode(data, True)
 
 def _load_testfile(filename, package, module_relative, encoding):
     if module_relative:
         package = _normalize_module(package, 3)
         filename = _module_relative_path(package, filename)
-        if getattr(package, '__loader__', None) is not None:
-            if hasattr(package.__loader__, 'get_data'):
-                file_contents = package.__loader__.get_data(filename)
-                file_contents = file_contents.decode(encoding)
-                # get_data() opens files as 'rb', so one must do the equivalent
-                # conversion as universal newlines would do.
-                return _newline_convert(file_contents), filename
+        if (loader := getattr(package, '__loader__', None)) is None:
+            try:
+                loader = package.__spec__.loader
+            except AttributeError:
+                pass
+        if hasattr(loader, 'get_data'):
+            file_contents = loader.get_data(filename)
+            file_contents = file_contents.decode(encoding)
+            # get_data() opens files as 'rb', so one must do the equivalent
+            # conversion as universal newlines would do.
+            return _newline_convert(file_contents), filename
     with open(filename, encoding=encoding) as f:
         return f.read(), filename
 
@@ -972,6 +973,17 @@
         else:
             raise ValueError("object must be a class or function")
 
+    def _is_routine(self, obj):
+        """
+        Safely unwrap objects and determine if they are functions.
+        """
+        maybe_routine = obj
+        try:
+            maybe_routine = inspect.unwrap(maybe_routine)
+        except ValueError:
+            pass
+        return inspect.isroutine(maybe_routine)
+
     def _find(self, tests, obj, name, module, source_lines, globs, seen):
         """
         Find tests for the given object and any contained objects, and
@@ -994,9 +1006,9 @@
         if inspect.ismodule(obj) and self._recurse:
             for valname, val in obj.__dict__.items():
                 valname = '%s.%s' % (name, valname)
+
                 # Recurse to functions & classes.
-                if ((inspect.isroutine(inspect.unwrap(val))
-                     or inspect.isclass(val)) and
+                if ((self._is_routine(val) or inspect.isclass(val)) and
                     self._from_module(module, val)):
                     self._find(tests, val, valname, module, source_lines,
                                globs, seen)
@@ -1022,10 +1034,8 @@
         if inspect.isclass(obj) and self._recurse:
             for valname, val in obj.__dict__.items():
                 # Special handling for staticmethod/classmethod.
-                if isinstance(val, staticmethod):
-                    val = getattr(obj, valname)
-                if isinstance(val, classmethod):
-                    val = getattr(obj, valname).__func__
+                if isinstance(val, (staticmethod, classmethod)):
+                    val = val.__func__
 
                 # Recurse to methods, properties, and nested classes.
                 if ((inspect.isroutine(val) or inspect.isclass(val) or
diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
index 41ff6f8..ba5ad5a 100644
--- a/Lib/email/_parseaddr.py
+++ b/Lib/email/_parseaddr.py
@@ -65,8 +65,10 @@
 
     """
     if not data:
-        return
+        return None
     data = data.split()
+    if not data:  # This happens for whitespace-only input.
+        return None
     # The FWS after the comma after the day-of-week is optional, so search and
     # adjust for this.
     if data[0].endswith(',') or data[0].lower() in _daynames:
@@ -126,6 +128,8 @@
             tss = 0
         elif len(tm) == 3:
             [thh, tmm, tss] = tm
+        else:
+            return None
     else:
         return None
     try:
diff --git a/Lib/email/base64mime.py b/Lib/email/base64mime.py
index 17f0818..a7cc373 100644
--- a/Lib/email/base64mime.py
+++ b/Lib/email/base64mime.py
@@ -84,7 +84,7 @@
     in an email.
     """
     if not s:
-        return s
+        return ""
 
     encvec = []
     max_unencoded = maxlinelen * 3 // 4
diff --git a/Lib/email/contentmanager.py b/Lib/email/contentmanager.py
index b91fb0e..fcf278d 100644
--- a/Lib/email/contentmanager.py
+++ b/Lib/email/contentmanager.py
@@ -144,7 +144,7 @@
     linesep = policy.linesep.encode('ascii')
     def embedded_body(lines): return linesep.join(lines) + linesep
     def normal_body(lines): return b'\n'.join(lines) + b'\n'
-    if cte==None:
+    if cte is None:
         # Use heuristics to decide on the "best" encoding.
         if max((len(x) for x in lines), default=0) <= policy.max_line_length:
             try:
@@ -238,9 +238,7 @@
         data = binascii.b2a_qp(data, istext=False, header=False, quotetabs=True)
         data = data.decode('ascii')
     elif cte == '7bit':
-        # Make sure it really is only ASCII.  The early warning here seems
-        # worth the overhead...if you care write your own content manager :).
-        data.encode('ascii')
+        data = data.decode('ascii')
     elif cte in ('8bit', 'binary'):
         data = data.decode('ascii', 'surrogateescape')
     msg.set_payload(data)
diff --git a/Lib/email/errors.py b/Lib/email/errors.py
index d28a680..3ad0056 100644
--- a/Lib/email/errors.py
+++ b/Lib/email/errors.py
@@ -108,3 +108,6 @@
     """local_part contains non-ASCII characters"""
     # This defect only occurs during unicode parsing, not when
     # parsing messages decoded from binary.
+
+class InvalidDateDefect(HeaderDefect):
+    """Header has unparsable or invalid date"""
diff --git a/Lib/email/headerregistry.py b/Lib/email/headerregistry.py
index 5d84fc0..b590d69 100644
--- a/Lib/email/headerregistry.py
+++ b/Lib/email/headerregistry.py
@@ -2,10 +2,6 @@
 
 This module provides an implementation of the HeaderRegistry API.
 The implementation is designed to flexibly follow RFC5322 rules.
-
-Eventually HeaderRegistry will be a public API, but it isn't yet,
-and will probably change some before that happens.
-
 """
 from types import MappingProxyType
 
@@ -302,7 +298,14 @@
             kwds['parse_tree'] = parser.TokenList()
             return
         if isinstance(value, str):
-            value = utils.parsedate_to_datetime(value)
+            kwds['decoded'] = value
+            try:
+                value = utils.parsedate_to_datetime(value)
+            except ValueError:
+                kwds['defects'].append(errors.InvalidDateDefect('Invalid date value or format'))
+                kwds['datetime'] = None
+                kwds['parse_tree'] = parser.TokenList()
+                return
         kwds['datetime'] = value
         kwds['decoded'] = utils.format_datetime(kwds['datetime'])
         kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
diff --git a/Lib/email/message.py b/Lib/email/message.py
index 3701b30..6752ce0 100644
--- a/Lib/email/message.py
+++ b/Lib/email/message.py
@@ -948,7 +948,7 @@
         if policy is None:
             from email.policy import default
             policy = default
-        Message.__init__(self, policy)
+        super().__init__(policy)
 
 
     def as_string(self, unixfrom=False, maxheaderlen=None, policy=None):
@@ -965,7 +965,7 @@
         policy = self.policy if policy is None else policy
         if maxheaderlen is None:
             maxheaderlen = policy.max_line_length
-        return super().as_string(maxheaderlen=maxheaderlen, policy=policy)
+        return super().as_string(unixfrom, maxheaderlen, policy)
 
     def __str__(self):
         return self.as_string(policy=self.policy.clone(utf8=True))
@@ -982,7 +982,7 @@
             if subtype in preferencelist:
                 yield (preferencelist.index(subtype), part)
             return
-        if maintype != 'multipart':
+        if maintype != 'multipart' or not self.is_multipart():
             return
         if subtype != 'related':
             for subpart in part.iter_parts():
@@ -1087,7 +1087,7 @@
 
         Return an empty iterator for a non-multipart.
         """
-        if self.get_content_maintype() == 'multipart':
+        if self.is_multipart():
             yield from self.get_payload()
 
     def get_content(self, *args, content_manager=None, **kw):
diff --git a/Lib/email/utils.py b/Lib/email/utils.py
index 1a7719d..cfdfeb3 100644
--- a/Lib/email/utils.py
+++ b/Lib/email/utils.py
@@ -109,7 +109,7 @@
 
 def getaddresses(fieldvalues):
     """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
-    all = COMMASPACE.join(fieldvalues)
+    all = COMMASPACE.join(str(v) for v in fieldvalues)
     a = _AddressList(all)
     return a.addresslist
 
@@ -195,7 +195,10 @@
 
 
 def parsedate_to_datetime(data):
-    *dtuple, tz = _parsedate_tz(data)
+    parsed_date_tz = _parsedate_tz(data)
+    if parsed_date_tz is None:
+        raise ValueError('Invalid date value or format "%s"' % str(data))
+    *dtuple, tz = parsed_date_tz
     if tz is None:
         return datetime.datetime(*dtuple[:6])
     return datetime.datetime(*dtuple[:6],
diff --git a/Lib/encodings/__init__.py b/Lib/encodings/__init__.py
index ddd5afd..4b37d33 100644
--- a/Lib/encodings/__init__.py
+++ b/Lib/encodings/__init__.py
@@ -61,7 +61,8 @@
         if c.isalnum() or c == '.':
             if punct and chars:
                 chars.append('_')
-            chars.append(c)
+            if c.isascii():
+                chars.append(c)
             punct = False
         else:
             punct = True
diff --git a/Lib/encodings/raw_unicode_escape.py b/Lib/encodings/raw_unicode_escape.py
index 2b919b4..46c8e07 100644
--- a/Lib/encodings/raw_unicode_escape.py
+++ b/Lib/encodings/raw_unicode_escape.py
@@ -21,15 +21,16 @@
     def encode(self, input, final=False):
         return codecs.raw_unicode_escape_encode(input, self.errors)[0]
 
-class IncrementalDecoder(codecs.IncrementalDecoder):
-    def decode(self, input, final=False):
-        return codecs.raw_unicode_escape_decode(input, self.errors)[0]
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+    def _buffer_decode(self, input, errors, final):
+        return codecs.raw_unicode_escape_decode(input, errors, final)
 
 class StreamWriter(Codec,codecs.StreamWriter):
     pass
 
 class StreamReader(Codec,codecs.StreamReader):
-    pass
+    def decode(self, input, errors='strict'):
+        return codecs.raw_unicode_escape_decode(input, errors, False)
 
 ### encodings module API
 
diff --git a/Lib/encodings/unicode_escape.py b/Lib/encodings/unicode_escape.py
index 817f932..9b1ce99 100644
--- a/Lib/encodings/unicode_escape.py
+++ b/Lib/encodings/unicode_escape.py
@@ -21,15 +21,16 @@
     def encode(self, input, final=False):
         return codecs.unicode_escape_encode(input, self.errors)[0]
 
-class IncrementalDecoder(codecs.IncrementalDecoder):
-    def decode(self, input, final=False):
-        return codecs.unicode_escape_decode(input, self.errors)[0]
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+    def _buffer_decode(self, input, errors, final):
+        return codecs.unicode_escape_decode(input, errors, final)
 
 class StreamWriter(Codec,codecs.StreamWriter):
     pass
 
 class StreamReader(Codec,codecs.StreamReader):
-    pass
+    def decode(self, input, errors='strict'):
+        return codecs.unicode_escape_decode(input, errors, False)
 
 ### encodings module API
 
diff --git a/Lib/enum.py b/Lib/enum.py
index ebadd9f..f5657a6 100644
--- a/Lib/enum.py
+++ b/Lib/enum.py
@@ -10,31 +10,55 @@
 
 
 def _is_descriptor(obj):
-    """Returns True if obj is a descriptor, False otherwise."""
+    """
+    Returns True if obj is a descriptor, False otherwise.
+    """
     return (
             hasattr(obj, '__get__') or
             hasattr(obj, '__set__') or
-            hasattr(obj, '__delete__'))
-
+            hasattr(obj, '__delete__')
+            )
 
 def _is_dunder(name):
-    """Returns True if a __dunder__ name, False otherwise."""
-    return (len(name) > 4 and
+    """
+    Returns True if a __dunder__ name, False otherwise.
+    """
+    return (
+            len(name) > 4 and
             name[:2] == name[-2:] == '__' and
             name[2] != '_' and
-            name[-3] != '_')
-
+            name[-3] != '_'
+            )
 
 def _is_sunder(name):
-    """Returns True if a _sunder_ name, False otherwise."""
-    return (len(name) > 2 and
+    """
+    Returns True if a _sunder_ name, False otherwise.
+    """
+    return (
+            len(name) > 2 and
             name[0] == name[-1] == '_' and
             name[1:2] != '_' and
-            name[-2:-1] != '_')
+            name[-2:-1] != '_'
+            )
 
+def _is_private(cls_name, name):
+    # do not use `re` as `re` imports `enum`
+    pattern = '_%s__' % (cls_name, )
+    pat_len = len(pattern)
+    if (
+            len(name) > pat_len
+            and name.startswith(pattern)
+            and name[pat_len:pat_len+1] != ['_']
+            and (name[-1] != '_' or name[-2] != '_')
+        ):
+        return True
+    else:
+        return False
 
 def _make_class_unpicklable(cls):
-    """Make the given class un-picklable."""
+    """
+    Make the given class un-picklable.
+    """
     def _break_on_call_reduce(self, proto):
         raise TypeError('%r cannot be pickled' % self)
     cls.__reduce_ex__ = _break_on_call_reduce
@@ -49,11 +73,11 @@
 
 
 class _EnumDict(dict):
-    """Track enum member order and ensure member names are not reused.
+    """
+    Track enum member order and ensure member names are not reused.
 
     EnumMeta will use the names found in self._member_names as the
     enumeration member names.
-
     """
     def __init__(self):
         super().__init__()
@@ -63,14 +87,22 @@
         self._auto_called = False
 
     def __setitem__(self, key, value):
-        """Changes anything not dundered or not a descriptor.
+        """
+        Changes anything not dundered or not a descriptor.
 
         If an enum member name is used twice, an error is raised; duplicate
         values are not checked for.
 
         Single underscore (sunder) names are reserved.
-
         """
+        if _is_private(self._cls_name, key):
+            import warnings
+            warnings.warn(
+                    "private variables, such as %r, will be normal attributes in 3.11"
+                        % (key, ),
+                    DeprecationWarning,
+                    stacklevel=2,
+                    )
         if _is_sunder(key):
             if key not in (
                     '_order_', '_create_pseudo_member_',
@@ -90,7 +122,10 @@
                 self._ignore = value
                 already = set(value) & set(self._member_names)
                 if already:
-                    raise ValueError('_ignore_ cannot specify already set names: %r' % (already, ))
+                    raise ValueError(
+                            '_ignore_ cannot specify already set names: %r'
+                            % (already, )
+                            )
         elif _is_dunder(key):
             if key == '__order__':
                 key = '_order_'
@@ -105,7 +140,12 @@
                 raise TypeError('%r already defined as: %r' % (key, self[key]))
             if isinstance(value, auto):
                 if value.value == _auto_null:
-                    value.value = self._generate_next_value(key, 1, len(self._member_names), self._last_values[:])
+                    value.value = self._generate_next_value(
+                            key,
+                            1,
+                            len(self._member_names),
+                            self._last_values[:],
+                            )
                     self._auto_called = True
                 value = value.value
             self._member_names.append(key)
@@ -118,22 +158,26 @@
 # This is also why there are checks in EnumMeta like `if Enum is not None`
 Enum = None
 
-
 class EnumMeta(type):
-    """Metaclass for Enum"""
+    """
+    Metaclass for Enum
+    """
     @classmethod
-    def __prepare__(metacls, cls, bases):
+    def __prepare__(metacls, cls, bases, **kwds):
         # check that previous enum members do not exist
         metacls._check_for_existing_members(cls, bases)
         # create the namespace dict
         enum_dict = _EnumDict()
+        enum_dict._cls_name = cls
         # inherit previous flags and _generate_next_value_ function
         member_type, first_enum = metacls._get_mixins_(cls, bases)
         if first_enum is not None:
-            enum_dict['_generate_next_value_'] = getattr(first_enum, '_generate_next_value_', None)
+            enum_dict['_generate_next_value_'] = getattr(
+                    first_enum, '_generate_next_value_', None,
+                    )
         return enum_dict
 
-    def __new__(metacls, cls, bases, classdict):
+    def __new__(metacls, cls, bases, classdict, **kwds):
         # an Enum class is final once enumeration items have been defined; it
         # cannot be mixed with other types (int, float, etc.) if it has an
         # inherited __new__ unless a new __new__ is defined (or the resulting
@@ -145,8 +189,9 @@
         for key in ignore:
             classdict.pop(key, None)
         member_type, first_enum = metacls._get_mixins_(cls, bases)
-        __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
-                                                        first_enum)
+        __new__, save_new, use_args = metacls._find_new_(
+                classdict, member_type, first_enum,
+                )
 
         # save enum items into separate mapping so they don't get baked into
         # the new class
@@ -167,17 +212,18 @@
         if '__doc__' not in classdict:
             classdict['__doc__'] = 'An enumeration.'
 
-        # create our new Enum type
-        enum_class = super().__new__(metacls, cls, bases, classdict)
+        enum_class = super().__new__(metacls, cls, bases, classdict, **kwds)
         enum_class._member_names_ = []               # names in definition order
         enum_class._member_map_ = {}                 # name->value map
         enum_class._member_type_ = member_type
 
         # save DynamicClassAttribute attributes from super classes so we know
         # if we can take the shortcut of storing members in the class dict
-        dynamic_attributes = {k for c in enum_class.mro()
-                              for k, v in c.__dict__.items()
-                              if isinstance(v, DynamicClassAttribute)}
+        dynamic_attributes = {
+                k for c in enum_class.mro()
+                for k, v in c.__dict__.items()
+                if isinstance(v, DynamicClassAttribute)
+                }
 
         # Reverse value->name map for hashable values.
         enum_class._value2member_map_ = {}
@@ -197,8 +243,32 @@
                 methods = ('__getnewargs_ex__', '__getnewargs__',
                         '__reduce_ex__', '__reduce__')
                 if not any(m in member_type.__dict__ for m in methods):
-                    _make_class_unpicklable(enum_class)
-
+                    if '__new__' in classdict:
+                        # too late, sabotage
+                        _make_class_unpicklable(enum_class)
+                    else:
+                        # final attempt to verify that pickling would work:
+                        # travel mro until __new__ is found, checking for
+                        # __reduce__ and friends along the way -- if any of them
+                        # are found before/when __new__ is found, pickling should
+                        # work
+                        sabotage = None
+                        for chain in bases:
+                            for base in chain.__mro__:
+                                if base is object:
+                                    continue
+                                elif any(m in base.__dict__ for m in methods):
+                                    # found one, we're good
+                                    sabotage = False
+                                    break
+                                elif '__new__' in base.__dict__:
+                                    # not good
+                                    sabotage = True
+                                    break
+                            if sabotage is not None:
+                                break
+                        if sabotage:
+                            _make_class_unpicklable(enum_class)
         # instantiate them, checking for duplicates as we go
         # we instantiate first instead of checking for duplicates first in case
         # a custom __new__ is doing something funky with the values -- such as
@@ -287,7 +357,8 @@
         return True
 
     def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
-        """Either returns an existing member, or creates a new enum class.
+        """
+        Either returns an existing member, or creates a new enum class.
 
         This method is used both when an enum class is given a value to match
         to an enumeration member (i.e. Color(3)) and for the functional API
@@ -309,40 +380,54 @@
         not correct, unpickling will fail in some circumstances.
 
         `type`, if set, will be mixed in as the first base class.
-
         """
         if names is None:  # simple value lookup
             return cls.__new__(cls, value)
         # otherwise, functional API: we're creating a new Enum type
-        return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
+        return cls._create_(
+                value,
+                names,
+                module=module,
+                qualname=qualname,
+                type=type,
+                start=start,
+                )
 
-    def __contains__(cls, member):
-        if not isinstance(member, Enum):
+    def __contains__(cls, obj):
+        if not isinstance(obj, Enum):
+            import warnings
+            warnings.warn(
+                    "in 3.12 __contains__ will no longer raise TypeError, but will return True if\n"
+                    "obj is a member or a member's value",
+                    DeprecationWarning,
+                    stacklevel=2,
+                    )
             raise TypeError(
                 "unsupported operand type(s) for 'in': '%s' and '%s'" % (
-                    type(member).__qualname__, cls.__class__.__qualname__))
-        return isinstance(member, cls) and member._name_ in cls._member_map_
+                    type(obj).__qualname__, cls.__class__.__qualname__))
+        return isinstance(obj, cls) and obj._name_ in cls._member_map_
 
     def __delattr__(cls, attr):
         # nicer error message when someone tries to delete an attribute
         # (see issue19025).
         if attr in cls._member_map_:
-            raise AttributeError(
-                    "%s: cannot delete Enum member." % cls.__name__)
+            raise AttributeError("%s: cannot delete Enum member." % cls.__name__)
         super().__delattr__(attr)
 
     def __dir__(self):
-        return (['__class__', '__doc__', '__members__', '__module__'] +
-                self._member_names_)
+        return (
+                ['__class__', '__doc__', '__members__', '__module__']
+                + self._member_names_
+                )
 
     def __getattr__(cls, name):
-        """Return the enum member matching `name`
+        """
+        Return the enum member matching `name`
 
         We use __getattr__ instead of descriptors or inserting into the enum
         class' __dict__ in order to support `name` and `value` being both
         properties for enum members (which live in the class' __dict__) and
         enum members themselves.
-
         """
         if _is_dunder(name):
             raise AttributeError(name)
@@ -355,6 +440,9 @@
         return cls._member_map_[name]
 
     def __iter__(cls):
+        """
+        Returns members in definition order.
+        """
         return (cls._member_map_[name] for name in cls._member_names_)
 
     def __len__(cls):
@@ -362,11 +450,11 @@
 
     @property
     def __members__(cls):
-        """Returns a mapping of member name->value.
+        """
+        Returns a mapping of member name->value.
 
         This mapping lists all enum members, including aliases. Note that this
         is a read-only view of the internal mapping.
-
         """
         return MappingProxyType(cls._member_map_)
 
@@ -374,15 +462,18 @@
         return "<enum %r>" % cls.__name__
 
     def __reversed__(cls):
+        """
+        Returns members in reverse definition order.
+        """
         return (cls._member_map_[name] for name in reversed(cls._member_names_))
 
     def __setattr__(cls, name, value):
-        """Block attempts to reassign Enum members.
+        """
+        Block attempts to reassign Enum members.
 
         A simple assignment to the class namespace only changes one of the
         several possible ways to get an Enum member from the Enum class,
         resulting in an inconsistent Enumeration.
-
         """
         member_map = cls.__dict__.get('_member_map_', {})
         if name in member_map:
@@ -390,7 +481,8 @@
         super().__setattr__(name, value)
 
     def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1):
-        """Convenience method to create a new Enum class.
+        """
+        Convenience method to create a new Enum class.
 
         `names` can be:
 
@@ -399,7 +491,6 @@
         * An iterable of member names.  Values are incremented by 1 from `start`.
         * An iterable of (member name, value) pairs.
         * A mapping of member name -> value pairs.
-
         """
         metacls = cls.__class__
         bases = (cls, ) if type is None else (type, cls)
@@ -480,37 +571,44 @@
         for chain in bases:
             for base in chain.__mro__:
                 if issubclass(base, Enum) and base._member_names_:
-                    raise TypeError("%s: cannot extend enumeration %r" % (class_name, base.__name__))
+                    raise TypeError(
+                            "%s: cannot extend enumeration %r"
+                            % (class_name, base.__name__)
+                            )
 
     @staticmethod
     def _get_mixins_(class_name, bases):
-        """Returns the type for creating enum members, and the first inherited
+        """
+        Returns the type for creating enum members, and the first inherited
         enum class.
 
         bases: the tuple of bases that was given to __new__
-
         """
         if not bases:
             return object, Enum
 
         def _find_data_type(bases):
-            data_types = []
+            data_types = set()
             for chain in bases:
                 candidate = None
                 for base in chain.__mro__:
                     if base is object:
                         continue
+                    elif issubclass(base, Enum):
+                        if base._member_type_ is not object:
+                            data_types.add(base._member_type_)
+                            break
                     elif '__new__' in base.__dict__:
                         if issubclass(base, Enum):
                             continue
-                        data_types.append(candidate or base)
+                        data_types.add(candidate or base)
                         break
-                    elif not issubclass(base, Enum):
-                        candidate = base
+                    else:
+                        candidate = candidate or base
             if len(data_types) > 1:
                 raise TypeError('%r: too many data types: %r' % (class_name, data_types))
             elif data_types:
-                return data_types[0]
+                return data_types.pop()
             else:
                 return None
 
@@ -527,12 +625,12 @@
 
     @staticmethod
     def _find_new_(classdict, member_type, first_enum):
-        """Returns the __new__ to be used for creating the enum members.
+        """
+        Returns the __new__ to be used for creating the enum members.
 
         classdict: the class dictionary given to __new__
         member_type: the data type whose __new__ will be used by default
         first_enum: enumeration to check for an overriding __new__
-
         """
         # now find the correct __new__, checking to see of one was defined
         # by the user; also check earlier enum classes in case a __new__ was
@@ -572,10 +670,10 @@
 
 
 class Enum(metaclass=EnumMeta):
-    """Generic enumeration.
+    """
+    Generic enumeration.
 
     Derive from this class to define new enumerations.
-
     """
     def __new__(cls, value):
         # all enum instances are actually created during class construction
@@ -603,21 +701,35 @@
         except Exception as e:
             exc = e
             result = None
-        if isinstance(result, cls):
-            return result
-        else:
-            ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
-            if result is None and exc is None:
-                raise ve_exc
-            elif exc is None:
-                exc = TypeError(
-                        'error in %s._missing_: returned %r instead of None or a valid member'
-                        % (cls.__name__, result)
-                        )
-            exc.__context__ = ve_exc
-            raise exc
+        try:
+            if isinstance(result, cls):
+                return result
+            else:
+                ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
+                if result is None and exc is None:
+                    raise ve_exc
+                elif exc is None:
+                    exc = TypeError(
+                            'error in %s._missing_: returned %r instead of None or a valid member'
+                            % (cls.__name__, result)
+                            )
+                if not isinstance(exc, ValueError):
+                    exc.__context__ = ve_exc
+                raise exc
+        finally:
+            # ensure all variables that could hold an exception are destroyed
+            exc = None
+            ve_exc = None
 
     def _generate_next_value_(name, start, count, last_values):
+        """
+        Generate the next value when not given.
+
+        name: the name of the member
+        start: the initial start value or None
+        count: the number of existing members
+        last_value: the last value assigned or None
+        """
         for last_value in reversed(last_values):
             try:
                 return last_value + 1
@@ -638,21 +750,27 @@
         return "%s.%s" % (self.__class__.__name__, self._name_)
 
     def __dir__(self):
+        """
+        Returns all members and all public methods
+        """
         added_behavior = [
                 m
                 for cls in self.__class__.mro()
                 for m in cls.__dict__
                 if m[0] != '_' and m not in self._member_map_
-                ]
+                ] + [m for m in self.__dict__ if m[0] != '_']
         return (['__class__', '__doc__', '__module__'] + added_behavior)
 
     def __format__(self, format_spec):
+        """
+        Returns format using actual value type unless __str__ has been overridden.
+        """
         # mixed-in Enums should use the mixed-in type's __format__, otherwise
         # we can get strange results with the Enum name showing up instead of
         # the value
 
         # pure Enum branch, or branch with __str__ explicitly overridden
-        str_overridden = type(self).__str__ != Enum.__str__
+        str_overridden = type(self).__str__ not in (Enum.__str__, Flag.__str__)
         if self._member_type_ is object or str_overridden:
             cls = str
             val = str(self)
@@ -694,7 +812,9 @@
     return self.name
 
 class Flag(Enum):
-    """Support for flags"""
+    """
+    Support for flags
+    """
 
     def _generate_next_value_(name, start, count, last_values):
         """
@@ -717,6 +837,9 @@
 
     @classmethod
     def _missing_(cls, value):
+        """
+        Returns member (possibly creating it) if one can be found for value.
+        """
         original_value = value
         if value < 0:
             value = ~value
@@ -746,6 +869,9 @@
         return pseudo_member
 
     def __contains__(self, other):
+        """
+        Returns True if self has at least the same flags set as other.
+        """
         if not isinstance(other, self.__class__):
             raise TypeError(
                 "unsupported operand type(s) for 'in': '%s' and '%s'" % (
@@ -804,10 +930,15 @@
 
 
 class IntFlag(int, Flag):
-    """Support for integer-based Flags"""
+    """
+    Support for integer-based Flags
+    """
 
     @classmethod
     def _missing_(cls, value):
+        """
+        Returns member (possibly creating it) if one can be found for value.
+        """
         if not isinstance(value, int):
             raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
         new_member = cls._create_pseudo_member_(value)
@@ -815,6 +946,9 @@
 
     @classmethod
     def _create_pseudo_member_(cls, value):
+        """
+        Create a composite member iff value contains only members.
+        """
         pseudo_member = cls._value2member_map_.get(value, None)
         if pseudo_member is None:
             need_to_create = [value]
@@ -869,11 +1003,15 @@
 
 
 def _high_bit(value):
-    """returns index of highest bit, or -1 if value is zero or negative"""
+    """
+    returns index of highest bit, or -1 if value is zero or negative
+    """
     return value.bit_length() - 1
 
 def unique(enumeration):
-    """Class decorator for enumerations ensuring unique member values."""
+    """
+    Class decorator for enumerations ensuring unique member values.
+    """
     duplicates = []
     for name, member in enumeration.__members__.items():
         if name != member.name:
@@ -886,7 +1024,9 @@
     return enumeration
 
 def _decompose(flag, value):
-    """Extract all members from the value."""
+    """
+    Extract all members from the value.
+    """
     # _decompose is only called if the value is not named
     not_covered = value
     negative = value < 0
diff --git a/Lib/filecmp.py b/Lib/filecmp.py
index 7a4da6b..70a4b23 100644
--- a/Lib/filecmp.py
+++ b/Lib/filecmp.py
@@ -36,8 +36,9 @@
 
     f2 -- Second file name
 
-    shallow -- Just check stat signature (do not read the files).
-               defaults to True.
+    shallow -- treat files as identical if their stat signatures (type, size,
+               mtime) are identical. Otherwise, files are considered different
+               if their sizes or contents differ.  [default: True]
 
     Return value:
 
@@ -115,7 +116,9 @@
      same_files: list of identical files.
      diff_files: list of filenames which differ.
      funny_files: list of files which could not be compared.
-     subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
+     subdirs: a dictionary of dircmp instances (or MyDirCmp instances if this
+       object is of type MyDirCmp, a subclass of dircmp), keyed by names
+       in common_dirs.
      """
 
     def __init__(self, a, b, ignore=None, hide=None): # Initialize
@@ -185,14 +188,15 @@
         self.same_files, self.diff_files, self.funny_files = xx
 
     def phase4(self): # Find out differences between common subdirectories
-        # A new dircmp object is created for each common subdirectory,
+        # A new dircmp (or MyDirCmp if dircmp was subclassed) object is created
+        # for each common subdirectory,
         # these are stored in a dictionary indexed by filename.
         # The hide and ignore properties are inherited from the parent
         self.subdirs = {}
         for x in self.common_dirs:
             a_x = os.path.join(self.left, x)
             b_x = os.path.join(self.right, x)
-            self.subdirs[x]  = dircmp(a_x, b_x, self.ignore, self.hide)
+            self.subdirs[x]  = self.__class__(a_x, b_x, self.ignore, self.hide)
 
     def phase4_closure(self): # Recursively call phase4() on subdirectories
         self.phase4()
diff --git a/Lib/fileinput.py b/Lib/fileinput.py
index 0c31f93..3534718 100644
--- a/Lib/fileinput.py
+++ b/Lib/fileinput.py
@@ -3,7 +3,7 @@
 Typical use is:
 
     import fileinput
-    for line in fileinput.input():
+    for line in fileinput.input(encoding="utf-8"):
         process(line)
 
 This iterates over the lines of all files listed in sys.argv[1:],
@@ -63,15 +63,9 @@
 deleted when the output file is closed.  In-place filtering is
 disabled when standard input is read.  XXX The current implementation
 does not work for MS-DOS 8+3 filesystems.
-
-XXX Possible additions:
-
-- optional getopt argument processing
-- isatty()
-- read(), read(size), even readlines()
-
 """
 
+import io
 import sys, os
 from types import GenericAlias
 
@@ -81,7 +75,8 @@
 
 _state = None
 
-def input(files=None, inplace=False, backup="", *, mode="r", openhook=None):
+def input(files=None, inplace=False, backup="", *, mode="r", openhook=None,
+          encoding=None, errors=None):
     """Return an instance of the FileInput class, which can be iterated.
 
     The parameters are passed to the constructor of the FileInput class.
@@ -91,7 +86,8 @@
     global _state
     if _state and _state._file:
         raise RuntimeError("input() already active")
-    _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook)
+    _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook,
+                       encoding=encoding, errors=errors)
     return _state
 
 def close():
@@ -186,7 +182,7 @@
     """
 
     def __init__(self, files=None, inplace=False, backup="", *,
-                 mode="r", openhook=None):
+                 mode="r", openhook=None, encoding=None, errors=None):
         if isinstance(files, str):
             files = (files,)
         elif isinstance(files, os.PathLike):
@@ -209,6 +205,17 @@
         self._file = None
         self._isstdin = False
         self._backupfilename = None
+        self._encoding = encoding
+        self._errors = errors
+
+        # We can not use io.text_encoding() here because old openhook doesn't
+        # take encoding parameter.
+        if (sys.flags.warn_default_encoding and
+                "b" not in mode and encoding is None and openhook is None):
+            import warnings
+            warnings.warn("'encoding' argument not specified.",
+                          EncodingWarning, 2)
+
         # restrict mode argument to reading modes
         if mode not in ('r', 'rU', 'U', 'rb'):
             raise ValueError("FileInput opening mode must be one of "
@@ -324,6 +331,13 @@
         self._file = None
         self._isstdin = False
         self._backupfilename = 0
+
+        # EncodingWarning is emitted in __init__() already
+        if "b" not in self._mode:
+            encoding = self._encoding or "locale"
+        else:
+            encoding = None
+
         if self._filename == '-':
             self._filename = '<stdin>'
             if 'b' in self._mode:
@@ -341,18 +355,18 @@
                     pass
                 # The next few lines may raise OSError
                 os.rename(self._filename, self._backupfilename)
-                self._file = open(self._backupfilename, self._mode)
+                self._file = open(self._backupfilename, self._mode, encoding=encoding)
                 try:
                     perm = os.fstat(self._file.fileno()).st_mode
                 except OSError:
-                    self._output = open(self._filename, self._write_mode)
+                    self._output = open(self._filename, self._write_mode, encoding=encoding)
                 else:
                     mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
                     if hasattr(os, 'O_BINARY'):
                         mode |= os.O_BINARY
 
                     fd = os.open(self._filename, mode, perm)
-                    self._output = os.fdopen(fd, self._write_mode)
+                    self._output = os.fdopen(fd, self._write_mode, encoding=encoding)
                     try:
                         os.chmod(self._filename, perm)
                     except OSError:
@@ -362,9 +376,15 @@
             else:
                 # This may raise OSError
                 if self._openhook:
-                    self._file = self._openhook(self._filename, self._mode)
+                    # Custom hooks made previous to Python 3.10 didn't have
+                    # encoding argument
+                    if self._encoding is None:
+                        self._file = self._openhook(self._filename, self._mode)
+                    else:
+                        self._file = self._openhook(
+                            self._filename, self._mode, encoding=self._encoding, errors=self._errors)
                 else:
-                    self._file = open(self._filename, self._mode)
+                    self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors)
         self._readline = self._file.readline  # hide FileInput._readline
         return self._readline()
 
@@ -395,16 +415,23 @@
     __class_getitem__ = classmethod(GenericAlias)
 
 
-def hook_compressed(filename, mode):
+def hook_compressed(filename, mode, *, encoding=None, errors=None):
+    if encoding is None:  # EncodingWarning is emitted in FileInput() already.
+        encoding = "locale"
     ext = os.path.splitext(filename)[1]
     if ext == '.gz':
         import gzip
-        return gzip.open(filename, mode)
+        stream = gzip.open(filename, mode)
     elif ext == '.bz2':
         import bz2
-        return bz2.BZ2File(filename, mode)
+        stream = bz2.BZ2File(filename, mode)
     else:
-        return open(filename, mode)
+        return open(filename, mode, encoding=encoding, errors=errors)
+
+    # gzip and bz2 are binary mode by default.
+    if "b" not in mode:
+        stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors)
+    return stream
 
 
 def hook_encoded(encoding, errors=None):
diff --git a/Lib/fnmatch.py b/Lib/fnmatch.py
index 0eb1802..7c52c23 100644
--- a/Lib/fnmatch.py
+++ b/Lib/fnmatch.py
@@ -52,7 +52,7 @@
     return re.compile(res).match
 
 def filter(names, pat):
-    """Return the subset of the list NAMES that match PAT."""
+    """Construct a list from those elements of the iterable NAMES that match PAT."""
     result = []
     pat = os.path.normcase(pat)
     match = _compile_pattern(pat)
diff --git a/Lib/formatter.py b/Lib/formatter.py
deleted file mode 100644
index e2394de..0000000
--- a/Lib/formatter.py
+++ /dev/null
@@ -1,452 +0,0 @@
-"""Generic output formatting.
-
-Formatter objects transform an abstract flow of formatting events into
-specific output events on writer objects. Formatters manage several stack
-structures to allow various properties of a writer object to be changed and
-restored; writers need not be able to handle relative changes nor any sort
-of ``change back'' operation. Specific writer properties which may be
-controlled via formatter objects are horizontal alignment, font, and left
-margin indentations. A mechanism is provided which supports providing
-arbitrary, non-exclusive style settings to a writer as well. Additional
-interfaces facilitate formatting events which are not reversible, such as
-paragraph separation.
-
-Writer objects encapsulate device interfaces. Abstract devices, such as
-file formats, are supported as well as physical devices. The provided
-implementations all work with abstract devices. The interface makes
-available mechanisms for setting the properties which formatter objects
-manage and inserting data into the output.
-"""
-
-import sys
-import warnings
-warnings.warn('the formatter module is deprecated', DeprecationWarning,
-              stacklevel=2)
-
-
-AS_IS = None
-
-
-class NullFormatter:
-    """A formatter which does nothing.
-
-    If the writer parameter is omitted, a NullWriter instance is created.
-    No methods of the writer are called by NullFormatter instances.
-
-    Implementations should inherit from this class if implementing a writer
-    interface but don't need to inherit any implementation.
-
-    """
-
-    def __init__(self, writer=None):
-        if writer is None:
-            writer = NullWriter()
-        self.writer = writer
-    def end_paragraph(self, blankline): pass
-    def add_line_break(self): pass
-    def add_hor_rule(self, *args, **kw): pass
-    def add_label_data(self, format, counter, blankline=None): pass
-    def add_flowing_data(self, data): pass
-    def add_literal_data(self, data): pass
-    def flush_softspace(self): pass
-    def push_alignment(self, align): pass
-    def pop_alignment(self): pass
-    def push_font(self, x): pass
-    def pop_font(self): pass
-    def push_margin(self, margin): pass
-    def pop_margin(self): pass
-    def set_spacing(self, spacing): pass
-    def push_style(self, *styles): pass
-    def pop_style(self, n=1): pass
-    def assert_line_data(self, flag=1): pass
-
-
-class AbstractFormatter:
-    """The standard formatter.
-
-    This implementation has demonstrated wide applicability to many writers,
-    and may be used directly in most circumstances.  It has been used to
-    implement a full-featured World Wide Web browser.
-
-    """
-
-    #  Space handling policy:  blank spaces at the boundary between elements
-    #  are handled by the outermost context.  "Literal" data is not checked
-    #  to determine context, so spaces in literal data are handled directly
-    #  in all circumstances.
-
-    def __init__(self, writer):
-        self.writer = writer            # Output device
-        self.align = None               # Current alignment
-        self.align_stack = []           # Alignment stack
-        self.font_stack = []            # Font state
-        self.margin_stack = []          # Margin state
-        self.spacing = None             # Vertical spacing state
-        self.style_stack = []           # Other state, e.g. color
-        self.nospace = 1                # Should leading space be suppressed
-        self.softspace = 0              # Should a space be inserted
-        self.para_end = 1               # Just ended a paragraph
-        self.parskip = 0                # Skipped space between paragraphs?
-        self.hard_break = 1             # Have a hard break
-        self.have_label = 0
-
-    def end_paragraph(self, blankline):
-        if not self.hard_break:
-            self.writer.send_line_break()
-            self.have_label = 0
-        if self.parskip < blankline and not self.have_label:
-            self.writer.send_paragraph(blankline - self.parskip)
-            self.parskip = blankline
-            self.have_label = 0
-        self.hard_break = self.nospace = self.para_end = 1
-        self.softspace = 0
-
-    def add_line_break(self):
-        if not (self.hard_break or self.para_end):
-            self.writer.send_line_break()
-            self.have_label = self.parskip = 0
-        self.hard_break = self.nospace = 1
-        self.softspace = 0
-
-    def add_hor_rule(self, *args, **kw):
-        if not self.hard_break:
-            self.writer.send_line_break()
-        self.writer.send_hor_rule(*args, **kw)
-        self.hard_break = self.nospace = 1
-        self.have_label = self.para_end = self.softspace = self.parskip = 0
-
-    def add_label_data(self, format, counter, blankline = None):
-        if self.have_label or not self.hard_break:
-            self.writer.send_line_break()
-        if not self.para_end:
-            self.writer.send_paragraph((blankline and 1) or 0)
-        if isinstance(format, str):
-            self.writer.send_label_data(self.format_counter(format, counter))
-        else:
-            self.writer.send_label_data(format)
-        self.nospace = self.have_label = self.hard_break = self.para_end = 1
-        self.softspace = self.parskip = 0
-
-    def format_counter(self, format, counter):
-        label = ''
-        for c in format:
-            if c == '1':
-                label = label + ('%d' % counter)
-            elif c in 'aA':
-                if counter > 0:
-                    label = label + self.format_letter(c, counter)
-            elif c in 'iI':
-                if counter > 0:
-                    label = label + self.format_roman(c, counter)
-            else:
-                label = label + c
-        return label
-
-    def format_letter(self, case, counter):
-        label = ''
-        while counter > 0:
-            counter, x = divmod(counter-1, 26)
-            # This makes a strong assumption that lowercase letters
-            # and uppercase letters form two contiguous blocks, with
-            # letters in order!
-            s = chr(ord(case) + x)
-            label = s + label
-        return label
-
-    def format_roman(self, case, counter):
-        ones = ['i', 'x', 'c', 'm']
-        fives = ['v', 'l', 'd']
-        label, index = '', 0
-        # This will die of IndexError when counter is too big
-        while counter > 0:
-            counter, x = divmod(counter, 10)
-            if x == 9:
-                label = ones[index] + ones[index+1] + label
-            elif x == 4:
-                label = ones[index] + fives[index] + label
-            else:
-                if x >= 5:
-                    s = fives[index]
-                    x = x-5
-                else:
-                    s = ''
-                s = s + ones[index]*x
-                label = s + label
-            index = index + 1
-        if case == 'I':
-            return label.upper()
-        return label
-
-    def add_flowing_data(self, data):
-        if not data: return
-        prespace = data[:1].isspace()
-        postspace = data[-1:].isspace()
-        data = " ".join(data.split())
-        if self.nospace and not data:
-            return
-        elif prespace or self.softspace:
-            if not data:
-                if not self.nospace:
-                    self.softspace = 1
-                    self.parskip = 0
-                return
-            if not self.nospace:
-                data = ' ' + data
-        self.hard_break = self.nospace = self.para_end = \
-                          self.parskip = self.have_label = 0
-        self.softspace = postspace
-        self.writer.send_flowing_data(data)
-
-    def add_literal_data(self, data):
-        if not data: return
-        if self.softspace:
-            self.writer.send_flowing_data(" ")
-        self.hard_break = data[-1:] == '\n'
-        self.nospace = self.para_end = self.softspace = \
-                       self.parskip = self.have_label = 0
-        self.writer.send_literal_data(data)
-
-    def flush_softspace(self):
-        if self.softspace:
-            self.hard_break = self.para_end = self.parskip = \
-                              self.have_label = self.softspace = 0
-            self.nospace = 1
-            self.writer.send_flowing_data(' ')
-
-    def push_alignment(self, align):
-        if align and align != self.align:
-            self.writer.new_alignment(align)
-            self.align = align
-            self.align_stack.append(align)
-        else:
-            self.align_stack.append(self.align)
-
-    def pop_alignment(self):
-        if self.align_stack:
-            del self.align_stack[-1]
-        if self.align_stack:
-            self.align = align = self.align_stack[-1]
-            self.writer.new_alignment(align)
-        else:
-            self.align = None
-            self.writer.new_alignment(None)
-
-    def push_font(self, font):
-        size, i, b, tt = font
-        if self.softspace:
-            self.hard_break = self.para_end = self.softspace = 0
-            self.nospace = 1
-            self.writer.send_flowing_data(' ')
-        if self.font_stack:
-            csize, ci, cb, ctt = self.font_stack[-1]
-            if size is AS_IS: size = csize
-            if i is AS_IS: i = ci
-            if b is AS_IS: b = cb
-            if tt is AS_IS: tt = ctt
-        font = (size, i, b, tt)
-        self.font_stack.append(font)
-        self.writer.new_font(font)
-
-    def pop_font(self):
-        if self.font_stack:
-            del self.font_stack[-1]
-        if self.font_stack:
-            font = self.font_stack[-1]
-        else:
-            font = None
-        self.writer.new_font(font)
-
-    def push_margin(self, margin):
-        self.margin_stack.append(margin)
-        fstack = [m for m in self.margin_stack if m]
-        if not margin and fstack:
-            margin = fstack[-1]
-        self.writer.new_margin(margin, len(fstack))
-
-    def pop_margin(self):
-        if self.margin_stack:
-            del self.margin_stack[-1]
-        fstack = [m for m in self.margin_stack if m]
-        if fstack:
-            margin = fstack[-1]
-        else:
-            margin = None
-        self.writer.new_margin(margin, len(fstack))
-
-    def set_spacing(self, spacing):
-        self.spacing = spacing
-        self.writer.new_spacing(spacing)
-
-    def push_style(self, *styles):
-        if self.softspace:
-            self.hard_break = self.para_end = self.softspace = 0
-            self.nospace = 1
-            self.writer.send_flowing_data(' ')
-        for style in styles:
-            self.style_stack.append(style)
-        self.writer.new_styles(tuple(self.style_stack))
-
-    def pop_style(self, n=1):
-        del self.style_stack[-n:]
-        self.writer.new_styles(tuple(self.style_stack))
-
-    def assert_line_data(self, flag=1):
-        self.nospace = self.hard_break = not flag
-        self.para_end = self.parskip = self.have_label = 0
-
-
-class NullWriter:
-    """Minimal writer interface to use in testing & inheritance.
-
-    A writer which only provides the interface definition; no actions are
-    taken on any methods.  This should be the base class for all writers
-    which do not need to inherit any implementation methods.
-
-    """
-    def __init__(self): pass
-    def flush(self): pass
-    def new_alignment(self, align): pass
-    def new_font(self, font): pass
-    def new_margin(self, margin, level): pass
-    def new_spacing(self, spacing): pass
-    def new_styles(self, styles): pass
-    def send_paragraph(self, blankline): pass
-    def send_line_break(self): pass
-    def send_hor_rule(self, *args, **kw): pass
-    def send_label_data(self, data): pass
-    def send_flowing_data(self, data): pass
-    def send_literal_data(self, data): pass
-
-
-class AbstractWriter(NullWriter):
-    """A writer which can be used in debugging formatters, but not much else.
-
-    Each method simply announces itself by printing its name and
-    arguments on standard output.
-
-    """
-
-    def new_alignment(self, align):
-        print("new_alignment(%r)" % (align,))
-
-    def new_font(self, font):
-        print("new_font(%r)" % (font,))
-
-    def new_margin(self, margin, level):
-        print("new_margin(%r, %d)" % (margin, level))
-
-    def new_spacing(self, spacing):
-        print("new_spacing(%r)" % (spacing,))
-
-    def new_styles(self, styles):
-        print("new_styles(%r)" % (styles,))
-
-    def send_paragraph(self, blankline):
-        print("send_paragraph(%r)" % (blankline,))
-
-    def send_line_break(self):
-        print("send_line_break()")
-
-    def send_hor_rule(self, *args, **kw):
-        print("send_hor_rule()")
-
-    def send_label_data(self, data):
-        print("send_label_data(%r)" % (data,))
-
-    def send_flowing_data(self, data):
-        print("send_flowing_data(%r)" % (data,))
-
-    def send_literal_data(self, data):
-        print("send_literal_data(%r)" % (data,))
-
-
-class DumbWriter(NullWriter):
-    """Simple writer class which writes output on the file object passed in
-    as the file parameter or, if file is omitted, on standard output.  The
-    output is simply word-wrapped to the number of columns specified by
-    the maxcol parameter.  This class is suitable for reflowing a sequence
-    of paragraphs.
-
-    """
-
-    def __init__(self, file=None, maxcol=72):
-        self.file = file or sys.stdout
-        self.maxcol = maxcol
-        NullWriter.__init__(self)
-        self.reset()
-
-    def reset(self):
-        self.col = 0
-        self.atbreak = 0
-
-    def send_paragraph(self, blankline):
-        self.file.write('\n'*blankline)
-        self.col = 0
-        self.atbreak = 0
-
-    def send_line_break(self):
-        self.file.write('\n')
-        self.col = 0
-        self.atbreak = 0
-
-    def send_hor_rule(self, *args, **kw):
-        self.file.write('\n')
-        self.file.write('-'*self.maxcol)
-        self.file.write('\n')
-        self.col = 0
-        self.atbreak = 0
-
-    def send_literal_data(self, data):
-        self.file.write(data)
-        i = data.rfind('\n')
-        if i >= 0:
-            self.col = 0
-            data = data[i+1:]
-        data = data.expandtabs()
-        self.col = self.col + len(data)
-        self.atbreak = 0
-
-    def send_flowing_data(self, data):
-        if not data: return
-        atbreak = self.atbreak or data[0].isspace()
-        col = self.col
-        maxcol = self.maxcol
-        write = self.file.write
-        for word in data.split():
-            if atbreak:
-                if col + len(word) >= maxcol:
-                    write('\n')
-                    col = 0
-                else:
-                    write(' ')
-                    col = col + 1
-            write(word)
-            col = col + len(word)
-            atbreak = 1
-        self.col = col
-        self.atbreak = data[-1].isspace()
-
-
-def test(file = None):
-    w = DumbWriter()
-    f = AbstractFormatter(w)
-    if file is not None:
-        fp = open(file)
-    elif sys.argv[1:]:
-        fp = open(sys.argv[1])
-    else:
-        fp = sys.stdin
-    try:
-        for line in fp:
-            if line == '\n':
-                f.end_paragraph(1)
-            else:
-                f.add_flowing_data(line)
-    finally:
-        if fp is not sys.stdin:
-            fp.close()
-    f.end_paragraph(0)
-
-
-if __name__ == '__main__':
-    test()
diff --git a/Lib/fractions.py b/Lib/fractions.py
index de3e23b..96047be 100644
--- a/Lib/fractions.py
+++ b/Lib/fractions.py
@@ -380,32 +380,139 @@
 
         return forward, reverse
 
+    # Rational arithmetic algorithms: Knuth, TAOCP, Volume 2, 4.5.1.
+    #
+    # Assume input fractions a and b are normalized.
+    #
+    # 1) Consider addition/subtraction.
+    #
+    # Let g = gcd(da, db). Then
+    #
+    #              na   nb    na*db ± nb*da
+    #     a ± b == -- ± -- == ------------- ==
+    #              da   db        da*db
+    #
+    #              na*(db//g) ± nb*(da//g)    t
+    #           == ----------------------- == -
+    #                      (da*db)//g         d
+    #
+    # Now, if g > 1, we're working with smaller integers.
+    #
+    # Note, that t, (da//g) and (db//g) are pairwise coprime.
+    #
+    # Indeed, (da//g) and (db//g) share no common factors (they were
+    # removed) and da is coprime with na (since input fractions are
+    # normalized), hence (da//g) and na are coprime.  By symmetry,
+    # (db//g) and nb are coprime too.  Then,
+    #
+    #     gcd(t, da//g) == gcd(na*(db//g), da//g) == 1
+    #     gcd(t, db//g) == gcd(nb*(da//g), db//g) == 1
+    #
+    # Above allows us optimize reduction of the result to lowest
+    # terms.  Indeed,
+    #
+    #     g2 = gcd(t, d) == gcd(t, (da//g)*(db//g)*g) == gcd(t, g)
+    #
+    #                       t//g2                   t//g2
+    #     a ± b == ----------------------- == ----------------
+    #              (da//g)*(db//g)*(g//g2)    (da//g)*(db//g2)
+    #
+    # is a normalized fraction.  This is useful because the unnormalized
+    # denominator d could be much larger than g.
+    #
+    # We should special-case g == 1 (and g2 == 1), since 60.8% of
+    # randomly-chosen integers are coprime:
+    # https://en.wikipedia.org/wiki/Coprime_integers#Probability_of_coprimality
+    # Note, that g2 == 1 always for fractions, obtained from floats: here
+    # g is a power of 2 and the unnormalized numerator t is an odd integer.
+    #
+    # 2) Consider multiplication
+    #
+    # Let g1 = gcd(na, db) and g2 = gcd(nb, da), then
+    #
+    #            na*nb    na*nb    (na//g1)*(nb//g2)
+    #     a*b == ----- == ----- == -----------------
+    #            da*db    db*da    (db//g1)*(da//g2)
+    #
+    # Note, that after divisions we're multiplying smaller integers.
+    #
+    # Also, the resulting fraction is normalized, because each of
+    # two factors in the numerator is coprime to each of the two factors
+    # in the denominator.
+    #
+    # Indeed, pick (na//g1).  It's coprime with (da//g2), because input
+    # fractions are normalized.  It's also coprime with (db//g1), because
+    # common factors are removed by g1 == gcd(na, db).
+    #
+    # As for addition/subtraction, we should special-case g1 == 1
+    # and g2 == 1 for same reason.  That happens also for multiplying
+    # rationals, obtained from floats.
+
     def _add(a, b):
         """a + b"""
-        da, db = a.denominator, b.denominator
-        return Fraction(a.numerator * db + b.numerator * da,
-                        da * db)
+        na, da = a.numerator, a.denominator
+        nb, db = b.numerator, b.denominator
+        g = math.gcd(da, db)
+        if g == 1:
+            return Fraction(na * db + da * nb, da * db, _normalize=False)
+        s = da // g
+        t = na * (db // g) + nb * s
+        g2 = math.gcd(t, g)
+        if g2 == 1:
+            return Fraction(t, s * db, _normalize=False)
+        return Fraction(t // g2, s * (db // g2), _normalize=False)
 
     __add__, __radd__ = _operator_fallbacks(_add, operator.add)
 
     def _sub(a, b):
         """a - b"""
-        da, db = a.denominator, b.denominator
-        return Fraction(a.numerator * db - b.numerator * da,
-                        da * db)
+        na, da = a.numerator, a.denominator
+        nb, db = b.numerator, b.denominator
+        g = math.gcd(da, db)
+        if g == 1:
+            return Fraction(na * db - da * nb, da * db, _normalize=False)
+        s = da // g
+        t = na * (db // g) - nb * s
+        g2 = math.gcd(t, g)
+        if g2 == 1:
+            return Fraction(t, s * db, _normalize=False)
+        return Fraction(t // g2, s * (db // g2), _normalize=False)
 
     __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
 
     def _mul(a, b):
         """a * b"""
-        return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
+        na, da = a.numerator, a.denominator
+        nb, db = b.numerator, b.denominator
+        g1 = math.gcd(na, db)
+        if g1 > 1:
+            na //= g1
+            db //= g1
+        g2 = math.gcd(nb, da)
+        if g2 > 1:
+            nb //= g2
+            da //= g2
+        return Fraction(na * nb, db * da, _normalize=False)
 
     __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
 
     def _div(a, b):
         """a / b"""
-        return Fraction(a.numerator * b.denominator,
-                        a.denominator * b.numerator)
+        # Same as _mul(), with inversed b.
+        na, da = a.numerator, a.denominator
+        nb, db = b.numerator, b.denominator
+        g1 = math.gcd(na, nb)
+        if g1 > 1:
+            na //= g1
+            nb //= g1
+        g2 = math.gcd(db, da)
+        if g2 > 1:
+            da //= g2
+            db //= g2
+        n, d = na * db, nb * da
+        if d < 0:
+            n, d = -n, -d
+        return Fraction(n, d, _normalize=False)
 
     __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
 
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
index 1f760ed..7c5a507 100644
--- a/Lib/ftplib.py
+++ b/Lib/ftplib.py
@@ -102,7 +102,9 @@
     sock = None
     file = None
     welcome = None
-    passiveserver = 1
+    passiveserver = True
+    # Disables https://bugs.python.org/issue43285 security if set to True.
+    trust_server_pasv_ipv4_address = False
 
     def __init__(self, host='', user='', passwd='', acct='',
                  timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
@@ -320,8 +322,13 @@
         return sock
 
     def makepasv(self):
+        """Internal: Does the PASV or EPSV handshake -> (address, port)"""
         if self.af == socket.AF_INET:
-            host, port = parse227(self.sendcmd('PASV'))
+            untrusted_host, port = parse227(self.sendcmd('PASV'))
+            if self.trust_server_pasv_ipv4_address:
+                host = untrusted_host
+            else:
+                host = self.sock.getpeername()[0]
         else:
             host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
         return host, port
diff --git a/Lib/functools.py b/Lib/functools.py
index 5cab497..305ceb4 100644
--- a/Lib/functools.py
+++ b/Lib/functools.py
@@ -88,84 +88,84 @@
 
 def _gt_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).'
-    op_result = self.__lt__(other)
+    op_result = type(self).__lt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result and self != other
 
 def _le_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).'
-    op_result = self.__lt__(other)
+    op_result = type(self).__lt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return op_result or self == other
 
 def _ge_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (not a < b).'
-    op_result = self.__lt__(other)
+    op_result = type(self).__lt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _ge_from_le(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).'
-    op_result = self.__le__(other)
+    op_result = type(self).__le__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result or self == other
 
 def _lt_from_le(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).'
-    op_result = self.__le__(other)
+    op_result = type(self).__le__(self, other)
     if op_result is NotImplemented:
         return op_result
     return op_result and self != other
 
 def _gt_from_le(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (not a <= b).'
-    op_result = self.__le__(other)
+    op_result = type(self).__le__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _lt_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).'
-    op_result = self.__gt__(other)
+    op_result = type(self).__gt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result and self != other
 
 def _ge_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).'
-    op_result = self.__gt__(other)
+    op_result = type(self).__gt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return op_result or self == other
 
 def _le_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (not a > b).'
-    op_result = self.__gt__(other)
+    op_result = type(self).__gt__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _le_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).'
-    op_result = self.__ge__(other)
+    op_result = type(self).__ge__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result or self == other
 
 def _gt_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).'
-    op_result = self.__ge__(other)
+    op_result = type(self).__ge__(self, other)
     if op_result is NotImplemented:
         return op_result
     return op_result and self != other
 
 def _lt_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (not a >= b).'
-    op_result = self.__ge__(other)
+    op_result = type(self).__ge__(self, other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
@@ -236,14 +236,14 @@
 
 def reduce(function, sequence, initial=_initial_missing):
     """
-    reduce(function, sequence[, initial]) -> value
+    reduce(function, iterable[, initial]) -> value
 
-    Apply a function of two arguments cumulatively to the items of a sequence,
-    from left to right, so as to reduce the sequence to a single value.
-    For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
+    Apply a function of two arguments cumulatively to the items of a sequence
+    or iterable, from left to right, so as to reduce the iterable to a single
+    value.  For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
     ((((1+2)+3)+4)+5).  If initial is present, it is placed before the items
-    of the sequence in the calculation, and serves as a default when the
-    sequence is empty.
+    of the iterable in the calculation, and serves as a default when the
+    iterable is empty.
     """
 
     it = iter(sequence)
@@ -252,7 +252,8 @@
         try:
             value = next(it)
         except StopIteration:
-            raise TypeError("reduce() of empty sequence with no initial value") from None
+            raise TypeError(
+                "reduce() of empty iterable with no initial value") from None
     else:
         value = initial
 
@@ -491,7 +492,7 @@
     with f.cache_info().  Clear the cache and statistics with f.cache_clear().
     Access the underlying function with f.__wrapped__.
 
-    See:  http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
+    See:  https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
 
     """
 
@@ -659,7 +660,7 @@
 def _c3_merge(sequences):
     """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
 
-    Adapted from http://www.python.org/download/releases/2.3/mro/.
+    Adapted from https://www.python.org/download/releases/2.3/mro/.
 
     """
     result = []
@@ -739,6 +740,7 @@
     # Remove entries which are already present in the __mro__ or unrelated.
     def is_related(typ):
         return (typ not in bases and hasattr(typ, '__mro__')
+                                 and not isinstance(typ, GenericAlias)
                                  and issubclass(cls, typ))
     types = [n for n in types if is_related(n)]
     # Remove entries which are strict bases of other entries (they will end up
@@ -836,6 +838,9 @@
             dispatch_cache[cls] = impl
         return impl
 
+    def _is_valid_dispatch_type(cls):
+        return isinstance(cls, type) and not isinstance(cls, GenericAlias)
+
     def register(cls, func=None):
         """generic_func.register(cls, func) -> func
 
@@ -843,9 +848,15 @@
 
         """
         nonlocal cache_token
-        if func is None:
-            if isinstance(cls, type):
+        if _is_valid_dispatch_type(cls):
+            if func is None:
                 return lambda f: register(cls, f)
+        else:
+            if func is not None:
+                raise TypeError(
+                    f"Invalid first argument to `register()`. "
+                    f"{cls!r} is not a class."
+                )
             ann = getattr(cls, '__annotations__', {})
             if not ann:
                 raise TypeError(
@@ -858,11 +869,12 @@
             # only import typing if annotation parsing is necessary
             from typing import get_type_hints
             argname, cls = next(iter(get_type_hints(func).items()))
-            if not isinstance(cls, type):
+            if not _is_valid_dispatch_type(cls):
                 raise TypeError(
                     f"Invalid annotation for {argname!r}. "
                     f"{cls!r} is not a class."
                 )
+
         registry[cls] = func
         if cache_token is None and hasattr(cls, '__abstractmethods__'):
             cache_token = get_cache_token()
diff --git a/Lib/getpass.py b/Lib/getpass.py
index 6911f41..6970d8a 100644
--- a/Lib/getpass.py
+++ b/Lib/getpass.py
@@ -95,7 +95,7 @@
 
 
 def win_getpass(prompt='Password: ', stream=None):
-    """Prompt for password with echo off, using Windows getch()."""
+    """Prompt for password with echo off, using Windows getwch()."""
     if sys.stdin is not sys.__stdin__:
         return fallback_getpass(prompt, stream)
 
diff --git a/Lib/glob.py b/Lib/glob.py
index 0dd2f8b..9fc08f4 100644
--- a/Lib/glob.py
+++ b/Lib/glob.py
@@ -1,13 +1,16 @@
 """Filename globbing utility."""
 
+import contextlib
 import os
 import re
 import fnmatch
+import itertools
+import stat
 import sys
 
 __all__ = ["glob", "iglob", "escape"]
 
-def glob(pathname, *, recursive=False):
+def glob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
     """Return a list of paths matching a pathname pattern.
 
     The pattern may contain simple shell-style wildcards a la
@@ -18,9 +21,9 @@
     If recursive is true, the pattern '**' will match any files and
     zero or more directories and subdirectories.
     """
-    return list(iglob(pathname, recursive=recursive))
+    return list(iglob(pathname, root_dir=root_dir, dir_fd=dir_fd, recursive=recursive))
 
-def iglob(pathname, *, recursive=False):
+def iglob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
     """Return an iterator which yields the paths matching a pathname pattern.
 
     The pattern may contain simple shell-style wildcards a la
@@ -32,35 +35,44 @@
     zero or more directories and subdirectories.
     """
     sys.audit("glob.glob", pathname, recursive)
-    it = _iglob(pathname, recursive, False)
-    if recursive and _isrecursive(pathname):
-        s = next(it)  # skip empty string
-        assert not s
+    sys.audit("glob.glob/2", pathname, recursive, root_dir, dir_fd)
+    if root_dir is not None:
+        root_dir = os.fspath(root_dir)
+    else:
+        root_dir = pathname[:0]
+    it = _iglob(pathname, root_dir, dir_fd, recursive, False)
+    if not pathname or recursive and _isrecursive(pathname[:2]):
+        try:
+            s = next(it)  # skip empty string
+            if s:
+                it = itertools.chain((s,), it)
+        except StopIteration:
+            pass
     return it
 
-def _iglob(pathname, recursive, dironly):
+def _iglob(pathname, root_dir, dir_fd, recursive, dironly):
     dirname, basename = os.path.split(pathname)
     if not has_magic(pathname):
         assert not dironly
         if basename:
-            if os.path.lexists(pathname):
+            if _lexists(_join(root_dir, pathname), dir_fd):
                 yield pathname
         else:
             # Patterns ending with a slash should match only directories
-            if os.path.isdir(dirname):
+            if _isdir(_join(root_dir, dirname), dir_fd):
                 yield pathname
         return
     if not dirname:
         if recursive and _isrecursive(basename):
-            yield from _glob2(dirname, basename, dironly)
+            yield from _glob2(root_dir, basename, dir_fd, dironly)
         else:
-            yield from _glob1(dirname, basename, dironly)
+            yield from _glob1(root_dir, basename, dir_fd, dironly)
         return
     # `os.path.split()` returns the argument itself as a dirname if it is a
     # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
     # contains magic characters (i.e. r'\\?\C:').
     if dirname != pathname and has_magic(dirname):
-        dirs = _iglob(dirname, recursive, True)
+        dirs = _iglob(dirname, root_dir, dir_fd, recursive, True)
     else:
         dirs = [dirname]
     if has_magic(basename):
@@ -71,76 +83,125 @@
     else:
         glob_in_dir = _glob0
     for dirname in dirs:
-        for name in glob_in_dir(dirname, basename, dironly):
+        for name in glob_in_dir(_join(root_dir, dirname), basename, dir_fd, dironly):
             yield os.path.join(dirname, name)
 
 # These 2 helper functions non-recursively glob inside a literal directory.
 # They return a list of basenames.  _glob1 accepts a pattern while _glob0
 # takes a literal basename (so it only has to check for its existence).
 
-def _glob1(dirname, pattern, dironly):
-    names = list(_iterdir(dirname, dironly))
+def _glob1(dirname, pattern, dir_fd, dironly):
+    names = _listdir(dirname, dir_fd, dironly)
     if not _ishidden(pattern):
         names = (x for x in names if not _ishidden(x))
     return fnmatch.filter(names, pattern)
 
-def _glob0(dirname, basename, dironly):
-    if not basename:
-        # `os.path.split()` returns an empty basename for paths ending with a
-        # directory separator.  'q*x/' should match only directories.
-        if os.path.isdir(dirname):
+def _glob0(dirname, basename, dir_fd, dironly):
+    if basename:
+        if _lexists(_join(dirname, basename), dir_fd):
             return [basename]
     else:
-        if os.path.lexists(os.path.join(dirname, basename)):
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if _isdir(dirname, dir_fd):
             return [basename]
     return []
 
 # Following functions are not public but can be used by third-party code.
 
 def glob0(dirname, pattern):
-    return _glob0(dirname, pattern, False)
+    return _glob0(dirname, pattern, None, False)
 
 def glob1(dirname, pattern):
-    return _glob1(dirname, pattern, False)
+    return _glob1(dirname, pattern, None, False)
 
 # This helper function recursively yields relative pathnames inside a literal
 # directory.
 
-def _glob2(dirname, pattern, dironly):
+def _glob2(dirname, pattern, dir_fd, dironly):
     assert _isrecursive(pattern)
     yield pattern[:0]
-    yield from _rlistdir(dirname, dironly)
+    yield from _rlistdir(dirname, dir_fd, dironly)
 
 # If dironly is false, yields all file names inside a directory.
 # If dironly is true, yields only directory names.
-def _iterdir(dirname, dironly):
-    if not dirname:
-        if isinstance(dirname, bytes):
-            dirname = bytes(os.curdir, 'ASCII')
-        else:
-            dirname = os.curdir
+def _iterdir(dirname, dir_fd, dironly):
     try:
-        with os.scandir(dirname) as it:
-            for entry in it:
-                try:
-                    if not dironly or entry.is_dir():
-                        yield entry.name
-                except OSError:
-                    pass
+        fd = None
+        fsencode = None
+        if dir_fd is not None:
+            if dirname:
+                fd = arg = os.open(dirname, _dir_open_flags, dir_fd=dir_fd)
+            else:
+                arg = dir_fd
+            if isinstance(dirname, bytes):
+                fsencode = os.fsencode
+        elif dirname:
+            arg = dirname
+        elif isinstance(dirname, bytes):
+            arg = bytes(os.curdir, 'ASCII')
+        else:
+            arg = os.curdir
+        try:
+            with os.scandir(arg) as it:
+                for entry in it:
+                    try:
+                        if not dironly or entry.is_dir():
+                            if fsencode is not None:
+                                yield fsencode(entry.name)
+                            else:
+                                yield entry.name
+                    except OSError:
+                        pass
+        finally:
+            if fd is not None:
+                os.close(fd)
     except OSError:
         return
 
+def _listdir(dirname, dir_fd, dironly):
+    with contextlib.closing(_iterdir(dirname, dir_fd, dironly)) as it:
+        return list(it)
+
 # Recursively yields relative pathnames inside a literal directory.
-def _rlistdir(dirname, dironly):
-    names = list(_iterdir(dirname, dironly))
+def _rlistdir(dirname, dir_fd, dironly):
+    names = _listdir(dirname, dir_fd, dironly)
     for x in names:
         if not _ishidden(x):
             yield x
-            path = os.path.join(dirname, x) if dirname else x
-            for y in _rlistdir(path, dironly):
-                yield os.path.join(x, y)
+            path = _join(dirname, x) if dirname else x
+            for y in _rlistdir(path, dir_fd, dironly):
+                yield _join(x, y)
 
 
+def _lexists(pathname, dir_fd):
+    # Same as os.path.lexists(), but with dir_fd
+    if dir_fd is None:
+        return os.path.lexists(pathname)
+    try:
+        os.lstat(pathname, dir_fd=dir_fd)
+    except (OSError, ValueError):
+        return False
+    else:
+        return True
+
+def _isdir(pathname, dir_fd):
+    # Same as os.path.isdir(), but with dir_fd
+    if dir_fd is None:
+        return os.path.isdir(pathname)
+    try:
+        st = os.stat(pathname, dir_fd=dir_fd)
+    except (OSError, ValueError):
+        return False
+    else:
+        return stat.S_ISDIR(st.st_mode)
+
+def _join(dirname, basename):
+    # It is common if dirname or basename is empty
+    if not dirname or not basename:
+        return dirname or basename
+    return os.path.join(dirname, basename)
+
 magic_check = re.compile('([*?[])')
 magic_check_bytes = re.compile(b'([*?[])')
 
@@ -171,3 +232,6 @@
     else:
         pathname = magic_check.sub(r'[\1]', pathname)
     return drive + pathname
+
+
+_dir_open_flags = os.O_RDONLY | getattr(os, 'O_DIRECTORY', 0)
diff --git a/Lib/graphlib.py b/Lib/graphlib.py
index d0e7a48..1c5d9a4 100644
--- a/Lib/graphlib.py
+++ b/Lib/graphlib.py
@@ -17,7 +17,7 @@
         self.npredecessors = 0
 
         # List of successor nodes. The list can contain duplicated elements as
-        # long as they're all reflected in the successor's npredecessors attribute).
+        # long as they're all reflected in the successor's npredecessors attribute.
         self.successors = []
 
 
diff --git a/Lib/gzip.py b/Lib/gzip.py
index e422773..475ec32 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -62,6 +62,7 @@
         raise TypeError("filename must be a str or bytes object, or a file")
 
     if "t" in mode:
+        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
@@ -277,7 +278,7 @@
         if self.fileobj is None:
             raise ValueError("write() on closed GzipFile object")
 
-        if isinstance(data, bytes):
+        if isinstance(data, (bytes, bytearray)):
             length = len(data)
         else:
             # accept any data that supports the buffer protocol
@@ -516,7 +517,7 @@
 
     def _read_eof(self):
         # We've read to the end of the file
-        # We check the that the computed CRC and size of the
+        # We check that the computed CRC and size of the
         # uncompressed data matches the stored values.  Note that the size
         # stored is the true file size mod 2**32.
         crc32, isize = struct.unpack("<II", self._read_exact(8))
@@ -583,8 +584,7 @@
                 g = sys.stdout.buffer
             else:
                 if arg[-3:] != ".gz":
-                    print("filename doesn't end in .gz:", repr(arg))
-                    continue
+                    sys.exit(f"filename doesn't end in .gz: {arg!r}")
                 f = open(arg, "rb")
                 g = builtins.open(arg[:-3], "wb")
         else:
@@ -596,7 +596,7 @@
                 f = builtins.open(arg, "rb")
                 g = open(arg + ".gz", "wb")
         while True:
-            chunk = f.read(1024)
+            chunk = f.read(io.DEFAULT_BUFFER_SIZE)
             if not chunk:
                 break
             g.write(chunk)
diff --git a/Lib/hashlib.py b/Lib/hashlib.py
index 58c340d..21a73f3 100644
--- a/Lib/hashlib.py
+++ b/Lib/hashlib.py
@@ -173,6 +173,7 @@
     algorithms_available = algorithms_available.union(
             _hashlib.openssl_md_meth_names)
 except ImportError:
+    _hashlib = None
     new = __py_new
     __get_hash = __get_builtin_constructor
 
@@ -180,6 +181,7 @@
     # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
     from _hashlib import pbkdf2_hmac
 except ImportError:
+    from warnings import warn as _warn
     _trans_5C = bytes((x ^ 0x5C) for x in range(256))
     _trans_36 = bytes((x ^ 0x36) for x in range(256))
 
@@ -190,6 +192,11 @@
         as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
         for long passwords.
         """
+        _warn(
+            "Python implementation of pbkdf2_hmac() is deprecated.",
+            category=DeprecationWarning,
+            stacklevel=2
+        )
         if not isinstance(hash_name, str):
             raise TypeError(hash_name)
 
diff --git a/Lib/hmac.py b/Lib/hmac.py
index 180bc37..8b4f920 100644
--- a/Lib/hmac.py
+++ b/Lib/hmac.py
@@ -8,11 +8,12 @@
     import _hashlib as _hashopenssl
 except ImportError:
     _hashopenssl = None
-    _openssl_md_meths = None
+    _functype = None
     from _operator import _compare_digest as compare_digest
 else:
-    _openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names)
     compare_digest = _hashopenssl.compare_digest
+    _functype = type(_hashopenssl.openssl_sha256)  # builtin type
+
 import hashlib as _hashlib
 
 trans_5C = bytes((x ^ 0x5C) for x in range(256))
@@ -23,7 +24,6 @@
 digest_size = None
 
 
-
 class HMAC:
     """RFC 2104 HMAC class.  Also complies with RFC 4231.
 
@@ -32,7 +32,7 @@
     blocksize = 64  # 512-bit HMAC; can be changed in subclasses.
 
     __slots__ = (
-        "_digest_cons", "_inner", "_outer", "block_size", "digest_size"
+        "_hmac", "_inner", "_outer", "block_size", "digest_size"
     )
 
     def __init__(self, key, msg=None, digestmod=''):
@@ -55,15 +55,30 @@
         if not digestmod:
             raise TypeError("Missing required parameter 'digestmod'.")
 
-        if callable(digestmod):
-            self._digest_cons = digestmod
-        elif isinstance(digestmod, str):
-            self._digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
+        if _hashopenssl and isinstance(digestmod, (str, _functype)):
+            try:
+                self._init_hmac(key, msg, digestmod)
+            except _hashopenssl.UnsupportedDigestmodError:
+                self._init_old(key, msg, digestmod)
         else:
-            self._digest_cons = lambda d=b'': digestmod.new(d)
+            self._init_old(key, msg, digestmod)
 
-        self._outer = self._digest_cons()
-        self._inner = self._digest_cons()
+    def _init_hmac(self, key, msg, digestmod):
+        self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod)
+        self.digest_size = self._hmac.digest_size
+        self.block_size = self._hmac.block_size
+
+    def _init_old(self, key, msg, digestmod):
+        if callable(digestmod):
+            digest_cons = digestmod
+        elif isinstance(digestmod, str):
+            digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
+        else:
+            digest_cons = lambda d=b'': digestmod.new(d)
+
+        self._hmac = None
+        self._outer = digest_cons()
+        self._inner = digest_cons()
         self.digest_size = self._inner.digest_size
 
         if hasattr(self._inner, 'block_size'):
@@ -79,13 +94,13 @@
                            RuntimeWarning, 2)
             blocksize = self.blocksize
 
+        if len(key) > blocksize:
+            key = digest_cons(key).digest()
+
         # self.blocksize is the default blocksize. self.block_size is
         # effective block size as well as the public API attribute.
         self.block_size = blocksize
 
-        if len(key) > blocksize:
-            key = self._digest_cons(key).digest()
-
         key = key.ljust(blocksize, b'\0')
         self._outer.update(key.translate(trans_5C))
         self._inner.update(key.translate(trans_36))
@@ -94,23 +109,15 @@
 
     @property
     def name(self):
-        return "hmac-" + self._inner.name
-
-    @property
-    def digest_cons(self):
-        return self._digest_cons
-
-    @property
-    def inner(self):
-        return self._inner
-
-    @property
-    def outer(self):
-        return self._outer
+        if self._hmac:
+            return self._hmac.name
+        else:
+            return f"hmac-{self._inner.name}"
 
     def update(self, msg):
         """Feed data from msg into this hashing object."""
-        self._inner.update(msg)
+        inst = self._hmac or self._inner
+        inst.update(msg)
 
     def copy(self):
         """Return a separate copy of this hashing object.
@@ -119,10 +126,14 @@
         """
         # Call __new__ directly to avoid the expensive __init__.
         other = self.__class__.__new__(self.__class__)
-        other._digest_cons = self._digest_cons
         other.digest_size = self.digest_size
-        other._inner = self._inner.copy()
-        other._outer = self._outer.copy()
+        if self._hmac:
+            other._hmac = self._hmac.copy()
+            other._inner = other._outer = None
+        else:
+            other._hmac = None
+            other._inner = self._inner.copy()
+            other._outer = self._outer.copy()
         return other
 
     def _current(self):
@@ -130,9 +141,12 @@
 
         To be used only internally with digest() and hexdigest().
         """
-        h = self._outer.copy()
-        h.update(self._inner.digest())
-        return h
+        if self._hmac:
+            return self._hmac
+        else:
+            h = self._outer.copy()
+            h.update(self._inner.digest())
+            return h
 
     def digest(self):
         """Return the hash value of this hashing object.
@@ -179,9 +193,11 @@
             A hashlib constructor returning a new hash object. *OR*
             A module supporting PEP 247.
     """
-    if (_hashopenssl is not None and
-            isinstance(digest, str) and digest in _openssl_md_meths):
-        return _hashopenssl.hmac_digest(key, msg, digest)
+    if _hashopenssl is not None and isinstance(digest, (str, _functype)):
+        try:
+            return _hashopenssl.hmac_digest(key, msg, digest)
+        except _hashopenssl.UnsupportedDigestmodError:
+            pass
 
     if callable(digest):
         digest_cons = digest
diff --git a/Lib/html/parser.py b/Lib/html/parser.py
index 6083077..58f6bb3 100644
--- a/Lib/html/parser.py
+++ b/Lib/html/parser.py
@@ -46,7 +46,7 @@
           |"[^"]*"                   # LIT-enclosed value
           |(?!['"])[^>\s]*           # bare value
          )
-         (?:\s*,)*                   # possibly followed by a comma
+        \s*                          # possibly followed by a space
        )?(?:\s|/(?!>))*
      )*
    )?
@@ -405,7 +405,7 @@
             tagname = namematch.group(1).lower()
             # consume and ignore other stuff between the name and the >
             # Note: this is not 100% correct, since we might have things like
-            # </tag attr=">">, but looking for > after tha name should cover
+            # </tag attr=">">, but looking for > after the name should cover
             # most of the cases and is much simpler
             gtpos = rawdata.find('>', namematch.end())
             self.handle_endtag(tagname)
diff --git a/Lib/http/__init__.py b/Lib/http/__init__.py
index 37be765..bf8d7d6 100644
--- a/Lib/http/__init__.py
+++ b/Lib/http/__init__.py
@@ -2,6 +2,7 @@
 
 __all__ = ['HTTPStatus']
 
+
 class HTTPStatus(IntEnum):
     """HTTP status codes and reason phrases
 
diff --git a/Lib/http/client.py b/Lib/http/client.py
index c2ad047..a6ab135 100644
--- a/Lib/http/client.py
+++ b/Lib/http/client.py
@@ -70,10 +70,12 @@
 
 import email.parser
 import email.message
+import errno
 import http
 import io
 import re
 import socket
+import sys
 import collections.abc
 from urllib.parse import urlsplit
 
@@ -201,15 +203,11 @@
                 lst.append(line)
         return lst
 
-def parse_headers(fp, _class=HTTPMessage):
-    """Parses only RFC2822 headers from a file pointer.
+def _read_headers(fp):
+    """Reads potential header lines into a list from a file pointer.
 
-    email Parser wants to see strings rather than bytes.
-    But a TextIOWrapper around self.rfile would buffer too many bytes
-    from the stream, bytes which we later need to read as bytes.
-    So we read the correct bytes here, as bytes, for email Parser
-    to parse.
-
+    Length of line is limited by _MAXLINE, and number of
+    headers is limited by _MAXHEADERS.
     """
     headers = []
     while True:
@@ -221,6 +219,19 @@
             raise HTTPException("got more than %d headers" % _MAXHEADERS)
         if line in (b'\r\n', b'\n', b''):
             break
+    return headers
+
+def parse_headers(fp, _class=HTTPMessage):
+    """Parses only RFC2822 headers from a file pointer.
+
+    email Parser wants to see strings rather than bytes.
+    But a TextIOWrapper around self.rfile would buffer too many bytes
+    from the stream, bytes which we later need to read as bytes.
+    So we read the correct bytes here, as bytes, for email Parser
+    to parse.
+
+    """
+    headers = _read_headers(fp)
     hstring = b''.join(headers).decode('iso-8859-1')
     return email.parser.Parser(_class=_class).parsestr(hstring)
 
@@ -308,15 +319,10 @@
             if status != CONTINUE:
                 break
             # skip the header from the 100 response
-            while True:
-                skip = self.fp.readline(_MAXLINE + 1)
-                if len(skip) > _MAXLINE:
-                    raise LineTooLong("header line")
-                skip = skip.strip()
-                if not skip:
-                    break
-                if self.debuglevel > 0:
-                    print("header:", skip)
+            skipped_headers = _read_headers(self.fp)
+            if self.debuglevel > 0:
+                print("headers:", skipped_headers)
+            del skipped_headers
 
         self.code = self.status = status
         self.reason = reason.strip()
@@ -349,9 +355,6 @@
         # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
         self.length = None
         length = self.headers.get("content-length")
-
-         # are we using the chunked-style of transfer encoding?
-        tr_enc = self.headers.get("transfer-encoding")
         if length and not self.chunked:
             try:
                 self.length = int(length)
@@ -452,18 +455,25 @@
             self._close_conn()
             return b""
 
+        if self.chunked:
+            return self._read_chunked(amt)
+
         if amt is not None:
-            # Amount is given, implement using readinto
-            b = bytearray(amt)
-            n = self.readinto(b)
-            return memoryview(b)[:n].tobytes()
+            if self.length is not None and amt > self.length:
+                # clip the read to the "end of response"
+                amt = self.length
+            s = self.fp.read(amt)
+            if not s and amt:
+                # Ideally, we would raise IncompleteRead if the content-length
+                # wasn't satisfied, but it might break compatibility.
+                self._close_conn()
+            elif self.length is not None:
+                self.length -= len(s)
+                if not self.length:
+                    self._close_conn()
+            return s
         else:
             # Amount is not given (unbounded read) so we must check self.length
-            # and self.chunked
-
-            if self.chunked:
-                return self._readall_chunked()
-
             if self.length is None:
                 s = self.fp.read()
             else:
@@ -564,7 +574,7 @@
             self.chunk_left = chunk_left
         return chunk_left
 
-    def _readall_chunked(self):
+    def _read_chunked(self, amt=None):
         assert self.chunked != _UNKNOWN
         value = []
         try:
@@ -572,7 +582,15 @@
                 chunk_left = self._get_chunk_left()
                 if chunk_left is None:
                     break
+
+                if amt is not None and amt <= chunk_left:
+                    value.append(self._safe_read(amt))
+                    self.chunk_left = chunk_left - amt
+                    break
+
                 value.append(self._safe_read(chunk_left))
+                if amt is not None:
+                    amt -= chunk_left
                 self.chunk_left = 0
             return b''.join(value)
         except IncompleteRead:
@@ -846,7 +864,7 @@
         the endpoint passed to `set_tunnel`. This done by sending an HTTP
         CONNECT request to the proxy server when the connection is established.
 
-        This method must be called before the HTML connection has been
+        This method must be called before the HTTP connection has been
         established.
 
         The headers argument should be a mapping of extra HTTP headers to send
@@ -886,23 +904,24 @@
         self.debuglevel = level
 
     def _tunnel(self):
-        connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
-            self._tunnel_port)
-        connect_bytes = connect_str.encode("ascii")
-        self.send(connect_bytes)
+        connect = b"CONNECT %s:%d HTTP/1.0\r\n" % (
+            self._tunnel_host.encode("ascii"), self._tunnel_port)
+        headers = [connect]
         for header, value in self._tunnel_headers.items():
-            header_str = "%s: %s\r\n" % (header, value)
-            header_bytes = header_str.encode("latin-1")
-            self.send(header_bytes)
-        self.send(b'\r\n')
+            headers.append(f"{header}: {value}\r\n".encode("latin-1"))
+        headers.append(b"\r\n")
+        # Making a single send() call instead of one per line encourages
+        # the host OS to use a more optimal packet size instead of
+        # potentially emitting a series of small packets.
+        self.send(b"".join(headers))
+        del headers
 
         response = self.response_class(self.sock, method=self._method)
         (version, code, message) = response._read_status()
 
         if code != http.HTTPStatus.OK:
             self.close()
-            raise OSError("Tunnel connection failed: %d %s" % (code,
-                                                               message.strip()))
+            raise OSError(f"Tunnel connection failed: {code} {message.strip()}")
         while True:
             line = response.fp.readline(_MAXLINE + 1)
             if len(line) > _MAXLINE:
@@ -918,9 +937,15 @@
 
     def connect(self):
         """Connect to the host and port specified in __init__."""
+        sys.audit("http.client.connect", self, self.host, self.port)
         self.sock = self._create_connection(
             (self.host,self.port), self.timeout, self.source_address)
-        self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+        # Might fail in OSs that don't implement TCP_NODELAY
+        try:
+             self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+        except OSError as e:
+            if e.errno != errno.ENOPROTOOPT:
+                raise
 
         if self._tunnel_host:
             self._tunnel()
@@ -965,8 +990,10 @@
                     break
                 if encode:
                     datablock = datablock.encode("iso-8859-1")
+                sys.audit("http.client.send", self, datablock)
                 self.sock.sendall(datablock)
             return
+        sys.audit("http.client.send", self, data)
         try:
             self.sock.sendall(data)
         except TypeError:
@@ -1392,6 +1419,9 @@
             self.cert_file = cert_file
             if context is None:
                 context = ssl._create_default_https_context()
+                # send ALPN extension to indicate HTTP/1.1 protocol
+                if self._http_vsn == 11:
+                    context.set_alpn_protocols(['http/1.1'])
                 # enable PHA for TLS 1.3 connections if available
                 if context.post_handshake_auth is not None:
                     context.post_handshake_auth = True
diff --git a/Lib/http/cookiejar.py b/Lib/http/cookiejar.py
index 47ed5c3..eaa76c2 100644
--- a/Lib/http/cookiejar.py
+++ b/Lib/http/cookiejar.py
@@ -50,10 +50,18 @@
         logger = logging.getLogger("http.cookiejar")
     return logger.debug(*args)
 
-
+HTTPONLY_ATTR = "HTTPOnly"
+HTTPONLY_PREFIX = "#HttpOnly_"
 DEFAULT_HTTP_PORT = str(http.client.HTTP_PORT)
+NETSCAPE_MAGIC_RGX = re.compile("#( Netscape)? HTTP Cookie File")
 MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
                          "instance initialised with one)")
+NETSCAPE_HEADER_TEXT =  """\
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file!  Do not edit.
+
+"""
 
 def _warn_unhandled_exception():
     # There are a few catch-all except: statements in this module, for
@@ -2004,19 +2012,11 @@
     header by default (Mozilla can cope with that).
 
     """
-    magic_re = re.compile("#( Netscape)? HTTP Cookie File")
-    header = """\
-# Netscape HTTP Cookie File
-# http://curl.haxx.se/rfc/cookie_spec.html
-# This is a generated file!  Do not edit.
-
-"""
 
     def _really_load(self, f, filename, ignore_discard, ignore_expires):
         now = time.time()
 
-        magic = f.readline()
-        if not self.magic_re.search(magic):
+        if not NETSCAPE_MAGIC_RGX.match(f.readline()):
             raise LoadError(
                 "%r does not look like a Netscape format cookies file" %
                 filename)
@@ -2024,8 +2024,17 @@
         try:
             while 1:
                 line = f.readline()
+                rest = {}
+
                 if line == "": break
 
+                # httponly is a cookie flag as defined in rfc6265
+                # when encoded in a netscape cookie file,
+                # the line is prepended with "#HttpOnly_"
+                if line.startswith(HTTPONLY_PREFIX):
+                    rest[HTTPONLY_ATTR] = ""
+                    line = line[len(HTTPONLY_PREFIX):]
+
                 # last field may be absent, so keep any trailing tab
                 if line.endswith("\n"): line = line[:-1]
 
@@ -2063,7 +2072,7 @@
                            discard,
                            None,
                            None,
-                           {})
+                           rest)
                 if not ignore_discard and c.discard:
                     continue
                 if not ignore_expires and c.is_expired(now):
@@ -2083,16 +2092,17 @@
             else: raise ValueError(MISSING_FILENAME_TEXT)
 
         with open(filename, "w") as f:
-            f.write(self.header)
+            f.write(NETSCAPE_HEADER_TEXT)
             now = time.time()
             for cookie in self:
+                domain = cookie.domain
                 if not ignore_discard and cookie.discard:
                     continue
                 if not ignore_expires and cookie.is_expired(now):
                     continue
                 if cookie.secure: secure = "TRUE"
                 else: secure = "FALSE"
-                if cookie.domain.startswith("."): initial_dot = "TRUE"
+                if domain.startswith("."): initial_dot = "TRUE"
                 else: initial_dot = "FALSE"
                 if cookie.expires is not None:
                     expires = str(cookie.expires)
@@ -2107,7 +2117,9 @@
                 else:
                     name = cookie.name
                     value = cookie.value
+                if cookie.has_nonstandard_attr(HTTPONLY_ATTR):
+                    domain = HTTPONLY_PREFIX + domain
                 f.write(
-                    "\t".join([cookie.domain, initial_dot, cookie.path,
+                    "\t".join([domain, initial_dot, cookie.path,
                                secure, expires, name, value])+
                     "\n")
diff --git a/Lib/http/server.py b/Lib/http/server.py
index def05f4..58abadf 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -103,8 +103,6 @@
 import sys
 import time
 import urllib.parse
-import contextlib
-from functools import partial
 
 from http import HTTPStatus
 
@@ -414,7 +412,7 @@
             method = getattr(self, mname)
             method()
             self.wfile.flush() #actually send the response if not already done.
-        except socket.timeout as e:
+        except TimeoutError as e:
             #a read or a write timed out.  Discard this connection
             self.log_error("Request timed out: %r", e)
             self.close_connection = True
@@ -689,6 +687,7 @@
                              parts[3], parts[4])
                 new_url = urllib.parse.urlunsplit(new_parts)
                 self.send_header("Location", new_url)
+                self.send_header("Content-Length", "0")
                 self.end_headers()
                 return None
             for index in "index.html", "index.htm":
@@ -1092,8 +1091,7 @@
         env['PATH_INFO'] = uqrest
         env['PATH_TRANSLATED'] = self.translate_path(uqrest)
         env['SCRIPT_NAME'] = scriptname
-        if query:
-            env['QUERY_STRING'] = query
+        env['QUERY_STRING'] = query
         env['REMOTE_ADDR'] = self.client_address[0]
         authorization = self.headers.get("authorization")
         if authorization:
@@ -1239,7 +1237,6 @@
 
     """
     ServerClass.address_family, addr = _get_best_family(bind, port)
-
     HandlerClass.protocol_version = protocol
     with ServerClass(addr, HandlerClass) as httpd:
         host, port = httpd.socket.getsockname()[:2]
@@ -1256,29 +1253,29 @@
 
 if __name__ == '__main__':
     import argparse
+    import contextlib
 
     parser = argparse.ArgumentParser()
     parser.add_argument('--cgi', action='store_true',
-                       help='Run as CGI Server')
+                        help='run as CGI server')
     parser.add_argument('--bind', '-b', metavar='ADDRESS',
-                        help='Specify alternate bind address '
-                             '[default: all interfaces]')
+                        help='specify alternate bind address '
+                             '(default: all interfaces)')
     parser.add_argument('--directory', '-d', default=os.getcwd(),
-                        help='Specify alternative directory '
-                        '[default:current directory]')
-    parser.add_argument('port', action='store',
-                        default=8000, type=int,
+                        help='specify alternate directory '
+                             '(default: current directory)')
+    parser.add_argument('port', action='store', default=8000, type=int,
                         nargs='?',
-                        help='Specify alternate port [default: 8000]')
+                        help='specify alternate port (default: 8000)')
     args = parser.parse_args()
     if args.cgi:
         handler_class = CGIHTTPRequestHandler
     else:
-        handler_class = partial(SimpleHTTPRequestHandler,
-                                directory=args.directory)
+        handler_class = SimpleHTTPRequestHandler
 
     # ensure dual-stack is not disabled; ref #38907
     class DualStackServer(ThreadingHTTPServer):
+
         def server_bind(self):
             # suppress exception when protocol is IPv4
             with contextlib.suppress(Exception):
@@ -1286,6 +1283,10 @@
                     socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
             return super().server_bind()
 
+        def finish_request(self, request, client_address):
+            self.RequestHandlerClass(request, client_address, self,
+                                     directory=args.directory)
+
     test(
         HandlerClass=handler_class,
         ServerClass=DualStackServer,
diff --git a/Lib/imaplib.py b/Lib/imaplib.py
index d9720f2..7318439 100644
--- a/Lib/imaplib.py
+++ b/Lib/imaplib.py
@@ -1251,13 +1251,12 @@
             sys.stderr.write('  %s.%02d %s\n' % (tm, (secs*100)%100, s))
             sys.stderr.flush()
 
-        def _dump_ur(self, dict):
-            # Dump untagged responses (in `dict').
-            l = dict.items()
-            if not l: return
-            t = '\n\t\t'
-            l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
-            self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
+        def _dump_ur(self, untagged_resp_dict):
+            if not untagged_resp_dict:
+                return
+            items = (f'{key}: {value!r}'
+                    for key, value in untagged_resp_dict.items())
+            self._mesg('untagged responses dump:' + '\n\t\t'.join(items))
 
         def _log(self, line):
             # Keep log of last `_cmd_log_len' interactions for debugging.
diff --git a/Lib/imp.py b/Lib/imp.py
index 31f8c76..e02aaef 100644
--- a/Lib/imp.py
+++ b/Lib/imp.py
@@ -28,7 +28,8 @@
 import types
 import warnings
 
-warnings.warn("the imp module is deprecated in favour of importlib; "
+warnings.warn("the imp module is deprecated in favour of importlib and slated "
+              "for removal in Python 3.12; "
               "see the module's documentation for alternative uses",
               DeprecationWarning, stacklevel=2)
 
diff --git a/Lib/importlib/__init__.py b/Lib/importlib/__init__.py
index 0c73c50..ce61883 100644
--- a/Lib/importlib/__init__.py
+++ b/Lib/importlib/__init__.py
@@ -34,7 +34,7 @@
     import _frozen_importlib_external as _bootstrap_external
 except ImportError:
     from . import _bootstrap_external
-    _bootstrap_external._setup(_bootstrap)
+    _bootstrap_external._set_bootstrap_module(_bootstrap)
     _bootstrap._bootstrap_external = _bootstrap_external
 else:
     _bootstrap_external.__name__ = 'importlib._bootstrap_external'
@@ -54,7 +54,6 @@
 # Fully bootstrapped at this point, import whatever you like, circular
 # dependencies and startup overhead minimisation permitting :)
 
-import types
 import warnings
 
 
@@ -79,8 +78,8 @@
     This function is deprecated in favor of importlib.util.find_spec().
 
     """
-    warnings.warn('Deprecated since Python 3.4. '
-                  'Use importlib.util.find_spec() instead.',
+    warnings.warn('Deprecated since Python 3.4 and slated for removal in '
+                  'Python 3.12; use importlib.util.find_spec() instead',
                   DeprecationWarning, stacklevel=2)
     try:
         loader = sys.modules[name].__loader__
@@ -136,12 +135,13 @@
     The module must have been successfully imported before.
 
     """
-    if not module or not isinstance(module, types.ModuleType):
-        raise TypeError("reload() argument must be a module")
     try:
         name = module.__spec__.name
     except AttributeError:
-        name = module.__name__
+        try:
+            name = module.__name__
+        except AttributeError:
+            raise TypeError("reload() argument must be a module")
 
     if sys.modules.get(name) is not module:
         msg = "module {} not in sys.modules"
diff --git a/Lib/importlib/_abc.py b/Lib/importlib/_abc.py
new file mode 100644
index 0000000..f80348f
--- /dev/null
+++ b/Lib/importlib/_abc.py
@@ -0,0 +1,54 @@
+"""Subset of importlib.abc used to reduce importlib.util imports."""
+from . import _bootstrap
+import abc
+import warnings
+
+
+class Loader(metaclass=abc.ABCMeta):
+
+    """Abstract base class for import loaders."""
+
+    def create_module(self, spec):
+        """Return a module to initialize and into which to load.
+
+        This method should raise ImportError if anything prevents it
+        from creating a new module.  It may return None to indicate
+        that the spec should create the new module.
+        """
+        # By default, defer to default semantics for the new module.
+        return None
+
+    # We don't define exec_module() here since that would break
+    # hasattr checks we do to support backward compatibility.
+
+    def load_module(self, fullname):
+        """Return the loaded module.
+
+        The module must be added to sys.modules and have import-related
+        attributes set properly.  The fullname is a str.
+
+        ImportError is raised on failure.
+
+        This method is deprecated in favor of loader.exec_module(). If
+        exec_module() exists then it is used to provide a backwards-compatible
+        functionality for this method.
+
+        """
+        if not hasattr(self, 'exec_module'):
+            raise ImportError
+        # Warning implemented in _load_module_shim().
+        return _bootstrap._load_module_shim(self, fullname)
+
+    def module_repr(self, module):
+        """Return a module's repr.
+
+        Used by the module type when the method does not raise
+        NotImplementedError.
+
+        This method is deprecated.
+
+        """
+        warnings.warn("importlib.abc.Loader.module_repr() is deprecated and "
+                      "slated for removal in Python 3.12", DeprecationWarning)
+        # The exception will cause ModuleType.__repr__ to ignore this method.
+        raise NotImplementedError
diff --git a/Lib/importlib/_adapters.py b/Lib/importlib/_adapters.py
new file mode 100644
index 0000000..e72edd1
--- /dev/null
+++ b/Lib/importlib/_adapters.py
@@ -0,0 +1,83 @@
+from contextlib import suppress
+
+from . import abc
+
+
+class SpecLoaderAdapter:
+    """
+    Adapt a package spec to adapt the underlying loader.
+    """
+
+    def __init__(self, spec, adapter=lambda spec: spec.loader):
+        self.spec = spec
+        self.loader = adapter(spec)
+
+    def __getattr__(self, name):
+        return getattr(self.spec, name)
+
+
+class TraversableResourcesLoader:
+    """
+    Adapt a loader to provide TraversableResources.
+    """
+
+    def __init__(self, spec):
+        self.spec = spec
+
+    def get_resource_reader(self, name):
+        return DegenerateFiles(self.spec)._native()
+
+
+class DegenerateFiles:
+    """
+    Adapter for an existing or non-existant resource reader
+    to provide a degenerate .files().
+    """
+
+    class Path(abc.Traversable):
+        def iterdir(self):
+            return iter(())
+
+        def is_dir(self):
+            return False
+
+        is_file = exists = is_dir  # type: ignore
+
+        def joinpath(self, other):
+            return DegenerateFiles.Path()
+
+        @property
+        def name(self):
+            return ''
+
+        def open(self, mode='rb', *args, **kwargs):
+            raise ValueError()
+
+    def __init__(self, spec):
+        self.spec = spec
+
+    @property
+    def _reader(self):
+        with suppress(AttributeError):
+            return self.spec.loader.get_resource_reader(self.spec.name)
+
+    def _native(self):
+        """
+        Return the native reader if it supports files().
+        """
+        reader = self._reader
+        return reader if hasattr(reader, 'files') else self
+
+    def __getattr__(self, attr):
+        return getattr(self._reader, attr)
+
+    def files(self):
+        return DegenerateFiles.Path()
+
+
+def wrap_spec(package):
+    """
+    Construct a package spec with traversable compatibility
+    on the spec/loader/reader.
+    """
+    return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py
index e00b27e..527bc9c 100644
--- a/Lib/importlib/_bootstrap.py
+++ b/Lib/importlib/_bootstrap.py
@@ -20,10 +20,23 @@
 # reference any injected objects! This includes not only global code but also
 # anything specified at the class level.
 
+def _object_name(obj):
+    try:
+        return obj.__qualname__
+    except AttributeError:
+        return type(obj).__qualname__
+
 # Bootstrap-related code ######################################################
 
+# Modules injected manually by _setup()
+_thread = None
+_warnings = None
+_weakref = None
+
+# Import done by _install_external_importers()
 _bootstrap_external = None
 
+
 def _wrap(new, old):
     """Simple substitute for functools.update_wrapper."""
     for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
@@ -262,9 +275,12 @@
 def _load_module_shim(self, fullname):
     """Load the specified module into sys.modules and return it.
 
-    This method is deprecated.  Use loader.exec_module instead.
+    This method is deprecated.  Use loader.exec_module() instead.
 
     """
+    msg = ("the load_module() method is deprecated and slated for removal in "
+          "Python 3.12; use exec_module() instead")
+    _warnings.warn(msg, DeprecationWarning)
     spec = spec_from_loader(fullname, self)
     if fullname in sys.modules:
         module = sys.modules[fullname]
@@ -276,26 +292,16 @@
 # Module specifications #######################################################
 
 def _module_repr(module):
-    # The implementation of ModuleType.__repr__().
+    """The implementation of ModuleType.__repr__()."""
     loader = getattr(module, '__loader__', None)
-    if hasattr(loader, 'module_repr'):
-        # As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
-        # drop their implementations for module_repr. we can add a
-        # deprecation warning here.
+    if spec := getattr(module, "__spec__", None):
+        return _module_repr_from_spec(spec)
+    elif hasattr(loader, 'module_repr'):
         try:
             return loader.module_repr(module)
         except Exception:
             pass
-    try:
-        spec = module.__spec__
-    except AttributeError:
-        pass
-    else:
-        if spec is not None:
-            return _module_repr_from_spec(spec)
-
-    # We could use module.__class__.__name__ instead of 'module' in the
-    # various repr permutations.
+    # Fall through to a catch-all which always succeeds.
     try:
         name = module.__name__
     except AttributeError:
@@ -605,9 +611,9 @@
             else:
                 _init_module_attrs(spec, module, override=True)
                 if not hasattr(spec.loader, 'exec_module'):
-                    # (issue19713) Once BuiltinImporter and ExtensionFileLoader
-                    # have exec_module() implemented, we can add a deprecation
-                    # warning here.
+                    msg = (f"{_object_name(spec.loader)}.exec_module() not found; "
+                           "falling back to load_module()")
+                    _warnings.warn(msg, ImportWarning)
                     spec.loader.load_module(name)
                 else:
                     spec.loader.exec_module(module)
@@ -620,9 +626,8 @@
 
 
 def _load_backward_compatible(spec):
-    # (issue19713) Once BuiltinImporter and ExtensionFileLoader
-    # have exec_module() implemented, we can add a deprecation
-    # warning here.
+    # It is assumed that all callers have been warned about using load_module()
+    # appropriately before calling this function.
     try:
         spec.loader.load_module(spec.name)
     except:
@@ -661,6 +666,9 @@
     if spec.loader is not None:
         # Not a namespace package.
         if not hasattr(spec.loader, 'exec_module'):
+            msg = (f"{_object_name(spec.loader)}.exec_module() not found; "
+                    "falling back to load_module()")
+            _warnings.warn(msg, ImportWarning)
             return _load_backward_compatible(spec)
 
     module = module_from_spec(spec)
@@ -731,6 +739,8 @@
         The method is deprecated.  The import machinery does the job itself.
 
         """
+        _warnings.warn("BuiltinImporter.module_repr() is deprecated and "
+                       "slated for removal in Python 3.12", DeprecationWarning)
         return f'<module {module.__name__!r} ({BuiltinImporter._ORIGIN})>'
 
     @classmethod
@@ -751,19 +761,22 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
+        _warnings.warn("BuiltinImporter.find_module() is deprecated and "
+                       "slated for removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         return spec.loader if spec is not None else None
 
-    @classmethod
-    def create_module(self, spec):
+    @staticmethod
+    def create_module(spec):
         """Create a built-in module"""
         if spec.name not in sys.builtin_module_names:
             raise ImportError('{!r} is not a built-in module'.format(spec.name),
                               name=spec.name)
         return _call_with_frames_removed(_imp.create_builtin, spec)
 
-    @classmethod
-    def exec_module(self, module):
+    @staticmethod
+    def exec_module(module):
         """Exec a built-in module"""
         _call_with_frames_removed(_imp.exec_builtin, module)
 
@@ -806,6 +819,8 @@
         The method is deprecated.  The import machinery does the job itself.
 
         """
+        _warnings.warn("FrozenImporter.module_repr() is deprecated and "
+                       "slated for removal in Python 3.12", DeprecationWarning)
         return '<module {!r} ({})>'.format(m.__name__, FrozenImporter._ORIGIN)
 
     @classmethod
@@ -822,10 +837,13 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
+        _warnings.warn("FrozenImporter.find_module() is deprecated and "
+                       "slated for removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         return cls if _imp.is_frozen(fullname) else None
 
-    @classmethod
-    def create_module(cls, spec):
+    @staticmethod
+    def create_module(spec):
         """Use default semantics for module creation."""
 
     @staticmethod
@@ -844,6 +862,7 @@
         This method is deprecated.  Use exec_module() instead.
 
         """
+        # Warning about deprecation implemented in _load_module_shim().
         return _load_module_shim(cls, fullname)
 
     @classmethod
@@ -890,8 +909,9 @@
 
 
 def _find_spec_legacy(finder, name, path):
-    # This would be a good place for a DeprecationWarning if
-    # we ended up going that route.
+    msg = (f"{_object_name(finder)}.find_spec() not found; "
+                           "falling back to find_module()")
+    _warnings.warn(msg, ImportWarning)
     loader = finder.find_module(name, path)
     if loader is None:
         return None
diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py
index 25a3f8c..49bcaea 100644
--- a/Lib/importlib/_bootstrap_external.py
+++ b/Lib/importlib/_bootstrap_external.py
@@ -19,6 +19,37 @@
 # reference any injected objects! This includes not only global code but also
 # anything specified at the class level.
 
+# Module injected manually by _set_bootstrap_module()
+_bootstrap = None
+
+# Import builtin modules
+import _imp
+import _io
+import sys
+import _warnings
+import marshal
+
+
+_MS_WINDOWS = (sys.platform == 'win32')
+if _MS_WINDOWS:
+    import nt as _os
+    import winreg
+else:
+    import posix as _os
+
+
+if _MS_WINDOWS:
+    path_separators = ['\\', '/']
+else:
+    path_separators = ['/']
+# Assumption made in _path_join()
+assert all(len(sep) == 1 for sep in path_separators)
+path_sep = path_separators[0]
+path_sep_tuple = tuple(path_separators)
+path_separators = ''.join(path_separators)
+_pathseps_with_colon = {f':{s}' for s in path_separators}
+
+
 # Bootstrap-related code ######################################################
 _CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
 _CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
@@ -42,6 +73,8 @@
             return False
     return _relax_case
 
+_relax_case = _make_relax_case()
+
 
 def _pack_uint32(x):
     """Convert a 32-bit integer to little-endian."""
@@ -59,22 +92,49 @@
     return int.from_bytes(data, 'little')
 
 
-def _path_join(*path_parts):
-    """Replacement for os.path.join()."""
-    return path_sep.join([part.rstrip(path_separators)
-                          for part in path_parts if part])
+if _MS_WINDOWS:
+    def _path_join(*path_parts):
+        """Replacement for os.path.join()."""
+        if not path_parts:
+            return ""
+        if len(path_parts) == 1:
+            return path_parts[0]
+        root = ""
+        path = []
+        for new_root, tail in map(_os._path_splitroot, path_parts):
+            if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple):
+                root = new_root.rstrip(path_separators) or root
+                path = [path_sep + tail]
+            elif new_root.endswith(':'):
+                if root.casefold() != new_root.casefold():
+                    # Drive relative paths have to be resolved by the OS, so we reset the
+                    # tail but do not add a path_sep prefix.
+                    root = new_root
+                    path = [tail]
+                else:
+                    path.append(tail)
+            else:
+                root = new_root or root
+                path.append(tail)
+        path = [p.rstrip(path_separators) for p in path if p]
+        if len(path) == 1 and not path[0]:
+            # Avoid losing the root's trailing separator when joining with nothing
+            return root + path_sep
+        return root + path_sep.join(path)
+
+else:
+    def _path_join(*path_parts):
+        """Replacement for os.path.join()."""
+        return path_sep.join([part.rstrip(path_separators)
+                              for part in path_parts if part])
 
 
 def _path_split(path):
     """Replacement for os.path.split()."""
-    if len(path_separators) == 1:
-        front, _, tail = path.rpartition(path_sep)
-        return front, tail
-    for x in reversed(path):
-        if x in path_separators:
-            front, tail = path.rsplit(x, maxsplit=1)
-            return front, tail
-    return '', path
+    i = max(path.rfind(p) for p in path_separators)
+    if i < 0:
+        return '', path
+    return path[:i], path[i + 1:]
 
 
 def _path_stat(path):
@@ -108,13 +168,18 @@
     return _path_is_mode_type(path, 0o040000)
 
 
-def _path_isabs(path):
-    """Replacement for os.path.isabs.
+if _MS_WINDOWS:
+    def _path_isabs(path):
+        """Replacement for os.path.isabs."""
+        if not path:
+            return False
+        root = _os._path_splitroot(path)[0].replace('/', '\\')
+        return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\'))
 
-    Considers a Windows drive-relative path (no drive, but starts with slash) to
-    still be "absolute".
-    """
-    return path.startswith(path_separators) or path[1:3] in _pathseps_with_colon
+else:
+    def _path_isabs(path):
+        """Replacement for os.path.isabs."""
+        return path.startswith(path_separators)
 
 
 def _write_atomic(path, data, mode=0o666):
@@ -277,6 +342,16 @@
 #     Python 3.9a2  3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156)
 #     Python 3.9a2  3424 (simplify bytecodes for *value unpacking)
 #     Python 3.9a2  3425 (simplify bytecodes for **value unpacking)
+#     Python 3.10a1 3430 (Make 'annotations' future by default)
+#     Python 3.10a1 3431 (New line number table format -- PEP 626)
+#     Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202)
+#     Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0)
+#     Python 3.10a6 3434 (PEP 634: Structural Pattern Matching)
+#     Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets).
+#     Python 3.10b1 3436 (Add GEN_START bytecode #43683)
+#     Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!)
+#     Python 3.10b1 3438 Safer line number table handling.
+#     Python 3.10b1 3439 (Add ROT_N)
 
 #
 # MAGIC must change whenever the bytecode emitted by the compiler may no
@@ -286,13 +361,17 @@
 # Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
 # in PC/launcher.c must also be updated.
 
-MAGIC_NUMBER = (3425).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3439).to_bytes(2, 'little') + b'\r\n'
 _RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little')  # For import.c
 
 _PYCACHE = '__pycache__'
 _OPT = 'opt-'
 
-SOURCE_SUFFIXES = ['.py']  # _setup() adds .pyw as needed.
+SOURCE_SUFFIXES = ['.py']
+if _MS_WINDOWS:
+    SOURCE_SUFFIXES.append('.pyw')
+
+EXTENSION_SUFFIXES = _imp.extension_suffixes()
 
 BYTECODE_SUFFIXES = ['.pyc']
 # Deprecated.
@@ -467,15 +546,18 @@
             raise ImportError('loader for %s cannot handle %s' %
                                 (self.name, name), name=name)
         return method(self, name, *args, **kwargs)
-    try:
+
+    # FIXME: @_check_name is used to define class methods before the
+    # _bootstrap module is set by _set_bootstrap_module().
+    if _bootstrap is not None:
         _wrap = _bootstrap._wrap
-    except NameError:
-        # XXX yuck
+    else:
         def _wrap(new, old):
             for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
                 if hasattr(old, replace):
                     setattr(new, replace, getattr(old, replace))
             new.__dict__.update(old.__dict__)
+
     _wrap(_check_name_wrapper, method)
     return _check_name_wrapper
 
@@ -487,6 +569,9 @@
     This method is deprecated in favor of finder.find_spec().
 
     """
+    _warnings.warn("find_module() is deprecated and "
+                   "slated for removal in Python 3.12; use find_spec() instead",
+                   DeprecationWarning)
     # Call find_loader(). If it returns a string (indicating this
     # is a namespace package portion), generate a warning and
     # return None.
@@ -658,6 +743,11 @@
                 pass
     else:
         location = _os.fspath(location)
+        if not _path_isabs(location):
+            try:
+                location = _path_join(_os.getcwd(), location)
+            except OSError:
+                pass
 
     # If the location is on the filesystem, but doesn't actually exist,
     # we could return None here, indicating that the location is not
@@ -711,10 +801,10 @@
     REGISTRY_KEY_DEBUG = (
         'Software\\Python\\PythonCore\\{sys_version}'
         '\\Modules\\{fullname}\\Debug')
-    DEBUG_BUILD = False  # Changed in _setup()
+    DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES)
 
-    @classmethod
-    def _open_registry(cls, key):
+    @staticmethod
+    def _open_registry(key):
         try:
             return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
         except OSError:
@@ -755,9 +845,12 @@
     def find_module(cls, fullname, path=None):
         """Find module named in the registry.
 
-        This method is deprecated.  Use exec_module() instead.
+        This method is deprecated.  Use find_spec() instead.
 
         """
+        _warnings.warn("WindowsRegistryFinder.find_module() is deprecated and "
+                       "slated for removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         if spec is not None:
             return spec.loader
@@ -790,7 +883,8 @@
         _bootstrap._call_with_frames_removed(exec, code, module.__dict__)
 
     def load_module(self, fullname):
-        """This module is deprecated."""
+        """This method is deprecated."""
+        # Warning implemented in _load_module_shim().
         return _bootstrap._load_module_shim(self, fullname)
 
 
@@ -965,7 +1059,7 @@
         """
         # The only reason for this method is for the name check.
         # Issue #14857: Avoid the zero-argument form of super so the implementation
-        # of that form can be updated without breaking the frozen module
+        # of that form can be updated without breaking the frozen module.
         return super(FileLoader, self).load_module(fullname)
 
     @_check_name
@@ -982,32 +1076,10 @@
             with _io.FileIO(path, 'r') as file:
                 return file.read()
 
-    # ResourceReader ABC API.
-
     @_check_name
     def get_resource_reader(self, module):
-        if self.is_package(module):
-            return self
-        return None
-
-    def open_resource(self, resource):
-        path = _path_join(_path_split(self.path)[0], resource)
-        return _io.FileIO(path, 'r')
-
-    def resource_path(self, resource):
-        if not self.is_resource(resource):
-            raise FileNotFoundError
-        path = _path_join(_path_split(self.path)[0], resource)
-        return path
-
-    def is_resource(self, name):
-        if path_sep in name:
-            return False
-        path = _path_join(_path_split(self.path)[0], name)
-        return _path_isfile(path)
-
-    def contents(self):
-        return iter(_os.listdir(_path_split(self.path)[0]))
+        from importlib.readers import FileReader
+        return FileReader(self)
 
 
 class SourceFileLoader(FileLoader, SourceLoader):
@@ -1080,10 +1152,6 @@
         return None
 
 
-# Filled in by _setup().
-EXTENSION_SUFFIXES = []
-
-
 class ExtensionFileLoader(FileLoader, _LoaderBasics):
 
     """Loader for extension modules.
@@ -1144,10 +1212,15 @@
     using path_finder.  For top-level modules, the parent module's path
     is sys.path."""
 
+    # When invalidate_caches() is called, this epoch is incremented
+    # https://bugs.python.org/issue45703
+    _epoch = 0
+
     def __init__(self, name, path, path_finder):
         self._name = name
         self._path = path
         self._last_parent_path = tuple(self._get_parent_path())
+        self._last_epoch = self._epoch
         self._path_finder = path_finder
 
     def _find_parent_path_names(self):
@@ -1167,7 +1240,7 @@
     def _recalculate(self):
         # If the parent's path has changed, recalculate _path
         parent_path = tuple(self._get_parent_path()) # Make a copy
-        if parent_path != self._last_parent_path:
+        if parent_path != self._last_parent_path or self._epoch != self._last_epoch:
             spec = self._path_finder(self._name, parent_path)
             # Note that no changes are made if a loader is returned, but we
             #  do remember the new parent path
@@ -1175,6 +1248,7 @@
                 if spec.submodule_search_locations:
                     self._path = spec.submodule_search_locations
             self._last_parent_path = parent_path     # Save the copy
+            self._last_epoch = self._epoch
         return self._path
 
     def __iter__(self):
@@ -1204,13 +1278,15 @@
     def __init__(self, name, path, path_finder):
         self._path = _NamespacePath(name, path, path_finder)
 
-    @classmethod
-    def module_repr(cls, module):
+    @staticmethod
+    def module_repr(module):
         """Return repr for the module.
 
         The method is deprecated.  The import machinery does the job itself.
 
         """
+        _warnings.warn("_NamespaceLoader.module_repr() is deprecated and "
+                       "slated for removal in Python 3.12", DeprecationWarning)
         return '<module {!r} (namespace)>'.format(module.__name__)
 
     def is_package(self, fullname):
@@ -1237,8 +1313,13 @@
         # The import system never calls this method.
         _bootstrap._verbose_message('namespace module loaded with path {!r}',
                                     self._path)
+        # Warning implemented in _load_module_shim().
         return _bootstrap._load_module_shim(self, fullname)
 
+    def get_resource_reader(self, module):
+        from importlib.readers import NamespaceReader
+        return NamespaceReader(self._path)
+
 
 # Finders #####################################################################
 
@@ -1246,8 +1327,8 @@
 
     """Meta path finder for sys.path and package __path__ attributes."""
 
-    @classmethod
-    def invalidate_caches(cls):
+    @staticmethod
+    def invalidate_caches():
         """Call the invalidate_caches() method on all path entry finders
         stored in sys.path_importer_caches (where implemented)."""
         for name, finder in list(sys.path_importer_cache.items()):
@@ -1255,9 +1336,12 @@
                 del sys.path_importer_cache[name]
             elif hasattr(finder, 'invalidate_caches'):
                 finder.invalidate_caches()
+        # Also invalidate the caches of _NamespacePaths
+        # https://bugs.python.org/issue45703
+        _NamespacePath._epoch += 1
 
-    @classmethod
-    def _path_hooks(cls, path):
+    @staticmethod
+    def _path_hooks(path):
         """Search sys.path_hooks for a finder for 'path'."""
         if sys.path_hooks is not None and not sys.path_hooks:
             _warnings.warn('sys.path_hooks is empty', ImportWarning)
@@ -1296,8 +1380,14 @@
         # This would be a good place for a DeprecationWarning if
         # we ended up going that route.
         if hasattr(finder, 'find_loader'):
+            msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
+                    "falling back to find_loader()")
+            _warnings.warn(msg, ImportWarning)
             loader, portions = finder.find_loader(fullname)
         else:
+            msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
+                    "falling back to find_module()")
+            _warnings.warn(msg, ImportWarning)
             loader = finder.find_module(fullname)
             portions = []
         if loader is not None:
@@ -1370,13 +1460,16 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
+        _warnings.warn("PathFinder.find_module() is deprecated and "
+                       "slated for removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         if spec is None:
             return None
         return spec.loader
 
-    @classmethod
-    def find_distributions(cls, *args, **kwargs):
+    @staticmethod
+    def find_distributions(*args, **kwargs):
         """
         Find distributions.
 
@@ -1408,6 +1501,8 @@
         self._loaders = loaders
         # Base (directory) path
         self.path = path or '.'
+        if not _path_isabs(self.path):
+            self.path = _path_join(_os.getcwd(), self.path)
         self._path_mtime = -1
         self._path_cache = set()
         self._relaxed_path_cache = set()
@@ -1425,6 +1520,9 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
+        _warnings.warn("FileFinder.find_loader() is deprecated and "
+                       "slated for removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         spec = self.find_spec(fullname)
         if spec is None:
             return None, []
@@ -1470,7 +1568,10 @@
                 is_namespace = _path_isdir(base_path)
         # Check for a file w/ a proper suffix exists.
         for suffix, loader_class in self._loaders:
-            full_path = _path_join(self.path, tail_module + suffix)
+            try:
+                full_path = _path_join(self.path, tail_module + suffix)
+            except ValueError:
+                return None
             _bootstrap._verbose_message('trying {}', full_path, verbosity=2)
             if cache_module + suffix in cache:
                 if _path_isfile(full_path):
@@ -1572,66 +1673,14 @@
     return [extensions, source, bytecode]
 
 
-def _setup(_bootstrap_module):
-    """Setup the path-based importers for importlib by importing needed
-    built-in modules and injecting them into the global namespace.
-
-    Other components are extracted from the core bootstrap module.
-
-    """
-    global sys, _imp, _bootstrap
+def _set_bootstrap_module(_bootstrap_module):
+    global _bootstrap
     _bootstrap = _bootstrap_module
-    sys = _bootstrap.sys
-    _imp = _bootstrap._imp
-
-    self_module = sys.modules[__name__]
-
-    # Directly load the os module (needed during bootstrap).
-    os_details = ('posix', ['/']), ('nt', ['\\', '/'])
-    for builtin_os, path_separators in os_details:
-        # Assumption made in _path_join()
-        assert all(len(sep) == 1 for sep in path_separators)
-        path_sep = path_separators[0]
-        if builtin_os in sys.modules:
-            os_module = sys.modules[builtin_os]
-            break
-        else:
-            try:
-                os_module = _bootstrap._builtin_from_name(builtin_os)
-                break
-            except ImportError:
-                continue
-    else:
-        raise ImportError('importlib requires posix or nt')
-
-    setattr(self_module, '_os', os_module)
-    setattr(self_module, 'path_sep', path_sep)
-    setattr(self_module, 'path_separators', ''.join(path_separators))
-    setattr(self_module, '_pathseps_with_colon', {f':{s}' for s in path_separators})
-
-    # Directly load built-in modules needed during bootstrap.
-    builtin_names = ['_io', '_warnings', 'marshal']
-    if builtin_os == 'nt':
-        builtin_names.append('winreg')
-    for builtin_name in builtin_names:
-        if builtin_name not in sys.modules:
-            builtin_module = _bootstrap._builtin_from_name(builtin_name)
-        else:
-            builtin_module = sys.modules[builtin_name]
-        setattr(self_module, builtin_name, builtin_module)
-
-    # Constants
-    setattr(self_module, '_relax_case', _make_relax_case())
-    EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
-    if builtin_os == 'nt':
-        SOURCE_SUFFIXES.append('.pyw')
-        if '_d.pyd' in EXTENSION_SUFFIXES:
-            WindowsRegistryFinder.DEBUG_BUILD = True
 
 
 def _install(_bootstrap_module):
     """Install the path-based import components."""
-    _setup(_bootstrap_module)
+    _set_bootstrap_module(_bootstrap_module)
     supported_loaders = _get_supported_file_loaders()
     sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
     sys.meta_path.append(PathFinder)
diff --git a/Lib/importlib/_common.py b/Lib/importlib/_common.py
index c1204f0..549fee3 100644
--- a/Lib/importlib/_common.py
+++ b/Lib/importlib/_common.py
@@ -1,9 +1,72 @@
 import os
 import pathlib
-import zipfile
 import tempfile
 import functools
 import contextlib
+import types
+import importlib
+
+from typing import Union, Any, Optional
+from .abc import ResourceReader, Traversable
+
+from ._adapters import wrap_spec
+
+Package = Union[types.ModuleType, str]
+
+
+def files(package):
+    # type: (Package) -> Traversable
+    """
+    Get a Traversable resource from a package
+    """
+    return from_package(get_package(package))
+
+
+def normalize_path(path):
+    # type: (Any) -> str
+    """Normalize a path by ensuring it is a string.
+
+    If the resulting string contains path separators, an exception is raised.
+    """
+    str_path = str(path)
+    parent, file_name = os.path.split(str_path)
+    if parent:
+        raise ValueError(f'{path!r} must be only a file name')
+    return file_name
+
+
+def get_resource_reader(package):
+    # type: (types.ModuleType) -> Optional[ResourceReader]
+    """
+    Return the package's loader if it's a ResourceReader.
+    """
+    # We can't use
+    # a issubclass() check here because apparently abc.'s __subclasscheck__()
+    # hook wants to create a weak reference to the object, but
+    # zipimport.zipimporter does not support weak references, resulting in a
+    # TypeError.  That seems terrible.
+    spec = package.__spec__
+    reader = getattr(spec.loader, 'get_resource_reader', None)  # type: ignore
+    if reader is None:
+        return None
+    return reader(spec.name)  # type: ignore
+
+
+def resolve(cand):
+    # type: (Package) -> types.ModuleType
+    return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
+
+
+def get_package(package):
+    # type: (Package) -> types.ModuleType
+    """Take a package name or module object and return the module.
+
+    Raise an exception if the resolved module is not a package.
+    """
+    resolved = resolve(package)
+    if wrap_spec(resolved).submodule_search_locations is None:
+        raise TypeError(f'{package!r} is not a package')
+    return resolved
 
 
 def from_package(package):
@@ -11,18 +74,9 @@
     Return a Traversable object for the given package.
 
     """
-    return fallback_resources(package.__spec__)
-
-
-def fallback_resources(spec):
-    package_directory = pathlib.Path(spec.origin).parent
-    try:
-        archive_path = spec.loader.archive
-        rel_path = package_directory.relative_to(archive_path)
-        return zipfile.Path(archive_path, str(rel_path) + '/')
-    except Exception:
-        pass
-    return package_directory
+    spec = wrap_spec(package)
+    reader = spec.loader.get_resource_reader(spec.name)
+    return reader.files()
 
 
 @contextlib.contextmanager
@@ -34,6 +88,7 @@
     try:
         os.write(fd, reader())
         os.close(fd)
+        del reader
         yield pathlib.Path(raw_path)
     finally:
         try:
@@ -43,14 +98,12 @@
 
 
 @functools.singledispatch
-@contextlib.contextmanager
 def as_file(path):
     """
     Given a Traversable object, return that object as a
     path on the local file system in a context manager.
     """
-    with _tempfile(path.read_bytes, suffix=path.name) as local:
-        yield local
+    return _tempfile(path.read_bytes, suffix=path.name)
 
 
 @as_file.register(pathlib.Path)
diff --git a/Lib/importlib/abc.py b/Lib/importlib/abc.py
index b8a9bb1..0b4a3f8 100644
--- a/Lib/importlib/abc.py
+++ b/Lib/importlib/abc.py
@@ -1,5 +1,4 @@
 """Abstract base classes related to import."""
-from . import _bootstrap
 from . import _bootstrap_external
 from . import machinery
 try:
@@ -12,8 +11,10 @@
     import _frozen_importlib_external
 except ImportError:
     _frozen_importlib_external = _bootstrap_external
+from ._abc import Loader
 import abc
 import warnings
+from typing import BinaryIO, Iterable, Text
 from typing import Protocol, runtime_checkable
 
 
@@ -40,15 +41,27 @@
     Deprecated since Python 3.3
     """
 
+    def __init__(self):
+        warnings.warn("the Finder ABC is deprecated and "
+                       "slated for removal in Python 3.12; use MetaPathFinder "
+                       "or PathEntryFinder instead",
+                       DeprecationWarning)
+
     @abc.abstractmethod
     def find_module(self, fullname, path=None):
         """An abstract method that should find a module.
         The fullname is a str and the optional path is a str or None.
         Returns a Loader object or None.
         """
+        warnings.warn("importlib.abc.Finder along with its find_module() "
+                      "method are deprecated and "
+                       "slated for removal in Python 3.12; use "
+                       "MetaPathFinder.find_spec() or "
+                       "PathEntryFinder.find_spec() instead",
+                       DeprecationWarning)
 
 
-class MetaPathFinder(Finder):
+class MetaPathFinder(metaclass=abc.ABCMeta):
 
     """Abstract base class for import finders on sys.meta_path."""
 
@@ -67,8 +80,8 @@
 
         """
         warnings.warn("MetaPathFinder.find_module() is deprecated since Python "
-                      "3.4 in favor of MetaPathFinder.find_spec() "
-                      "(available since 3.4)",
+                      "3.4 in favor of MetaPathFinder.find_spec() and is "
+                      "slated for removal in Python 3.12",
                       DeprecationWarning,
                       stacklevel=2)
         if not hasattr(self, 'find_spec'):
@@ -85,7 +98,7 @@
           machinery.PathFinder, machinery.WindowsRegistryFinder)
 
 
-class PathEntryFinder(Finder):
+class PathEntryFinder(metaclass=abc.ABCMeta):
 
     """Abstract base class for path entry finders used by PathFinder."""
 
@@ -134,53 +147,6 @@
 _register(PathEntryFinder, machinery.FileFinder)
 
 
-class Loader(metaclass=abc.ABCMeta):
-
-    """Abstract base class for import loaders."""
-
-    def create_module(self, spec):
-        """Return a module to initialize and into which to load.
-
-        This method should raise ImportError if anything prevents it
-        from creating a new module.  It may return None to indicate
-        that the spec should create the new module.
-        """
-        # By default, defer to default semantics for the new module.
-        return None
-
-    # We don't define exec_module() here since that would break
-    # hasattr checks we do to support backward compatibility.
-
-    def load_module(self, fullname):
-        """Return the loaded module.
-
-        The module must be added to sys.modules and have import-related
-        attributes set properly.  The fullname is a str.
-
-        ImportError is raised on failure.
-
-        This method is deprecated in favor of loader.exec_module(). If
-        exec_module() exists then it is used to provide a backwards-compatible
-        functionality for this method.
-
-        """
-        if not hasattr(self, 'exec_module'):
-            raise ImportError
-        return _bootstrap._load_module_shim(self, fullname)
-
-    def module_repr(self, module):
-        """Return a module's repr.
-
-        Used by the module type when the method does not raise
-        NotImplementedError.
-
-        This method is deprecated.
-
-        """
-        # The exception will cause ModuleType.__repr__ to ignore this method.
-        raise NotImplementedError
-
-
 class ResourceLoader(Loader):
 
     """Abstract base class for loaders which can return data from their
@@ -344,49 +310,45 @@
 
 
 class ResourceReader(metaclass=abc.ABCMeta):
-
-    """Abstract base class to provide resource-reading support.
-
-    Loaders that support resource reading are expected to implement
-    the ``get_resource_reader(fullname)`` method and have it either return None
-    or an object compatible with this ABC.
-    """
+    """Abstract base class for loaders to provide resource reading support."""
 
     @abc.abstractmethod
-    def open_resource(self, resource):
+    def open_resource(self, resource: Text) -> BinaryIO:
         """Return an opened, file-like object for binary reading.
 
-        The 'resource' argument is expected to represent only a file name
-        and thus not contain any subdirectory components.
-
+        The 'resource' argument is expected to represent only a file name.
         If the resource cannot be found, FileNotFoundError is raised.
         """
+        # This deliberately raises FileNotFoundError instead of
+        # NotImplementedError so that if this method is accidentally called,
+        # it'll still do the right thing.
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def resource_path(self, resource):
+    def resource_path(self, resource: Text) -> Text:
         """Return the file system path to the specified resource.
 
-        The 'resource' argument is expected to represent only a file name
-        and thus not contain any subdirectory components.
-
+        The 'resource' argument is expected to represent only a file name.
         If the resource does not exist on the file system, raise
         FileNotFoundError.
         """
+        # This deliberately raises FileNotFoundError instead of
+        # NotImplementedError so that if this method is accidentally called,
+        # it'll still do the right thing.
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def is_resource(self, name):
-        """Return True if the named 'name' is consider a resource."""
+    def is_resource(self, path: Text) -> bool:
+        """Return True if the named 'path' is a resource.
+
+        Files are resources, directories are not.
+        """
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def contents(self):
-        """Return an iterable of strings over the contents of the package."""
-        return []
-
-
-_register(ResourceReader, machinery.SourceFileLoader)
+    def contents(self) -> Iterable[str]:
+        """Return an iterable of entries in `package`."""
+        raise FileNotFoundError
 
 
 @runtime_checkable
@@ -402,26 +364,28 @@
         Yield Traversable objects in self
         """
 
-    @abc.abstractmethod
     def read_bytes(self):
         """
         Read contents of self as bytes
         """
+        with self.open('rb') as strm:
+            return strm.read()
 
-    @abc.abstractmethod
     def read_text(self, encoding=None):
         """
-        Read contents of self as bytes
+        Read contents of self as text
         """
+        with self.open(encoding=encoding) as strm:
+            return strm.read()
 
     @abc.abstractmethod
-    def is_dir(self):
+    def is_dir(self) -> bool:
         """
         Return True if self is a dir
         """
 
     @abc.abstractmethod
-    def is_file(self):
+    def is_file(self) -> bool:
         """
         Return True if self is a file
         """
@@ -432,11 +396,11 @@
         Return Traversable child in self
         """
 
-    @abc.abstractmethod
     def __truediv__(self, child):
         """
         Return Traversable child in self
         """
+        return self.joinpath(child)
 
     @abc.abstractmethod
     def open(self, mode='r', *args, **kwargs):
@@ -449,14 +413,18 @@
         """
 
     @abc.abstractproperty
-    def name(self):
-        # type: () -> str
+    def name(self) -> str:
         """
         The base name of this object without any parent references.
         """
 
 
 class TraversableResources(ResourceReader):
+    """
+    The required interface for providing traversable
+    resources.
+    """
+
     @abc.abstractmethod
     def files(self):
         """Return a Traversable object for the loaded package."""
@@ -468,7 +436,7 @@
         raise FileNotFoundError(resource)
 
     def is_resource(self, path):
-        return self.files().joinpath(path).isfile()
+        return self.files().joinpath(path).is_file()
 
     def contents(self):
         return (item.name for item in self.files().iterdir())
diff --git a/Lib/importlib/machinery.py b/Lib/importlib/machinery.py
index 1b2b5c9..9a7757f 100644
--- a/Lib/importlib/machinery.py
+++ b/Lib/importlib/machinery.py
@@ -1,7 +1,5 @@
 """The machinery of importlib: finders, loaders, hooks, etc."""
 
-import _imp
-
 from ._bootstrap import ModuleSpec
 from ._bootstrap import BuiltinImporter
 from ._bootstrap import FrozenImporter
diff --git a/Lib/importlib/metadata.py b/Lib/importlib/metadata.py
deleted file mode 100644
index ffa0cba..0000000
--- a/Lib/importlib/metadata.py
+++ /dev/null
@@ -1,586 +0,0 @@
-import io
-import os
-import re
-import abc
-import csv
-import sys
-import email
-import pathlib
-import zipfile
-import operator
-import functools
-import itertools
-import posixpath
-import collections
-
-from configparser import ConfigParser
-from contextlib import suppress
-from importlib import import_module
-from importlib.abc import MetaPathFinder
-from itertools import starmap
-
-
-__all__ = [
-    'Distribution',
-    'DistributionFinder',
-    'PackageNotFoundError',
-    'distribution',
-    'distributions',
-    'entry_points',
-    'files',
-    'metadata',
-    'requires',
-    'version',
-    ]
-
-
-class PackageNotFoundError(ModuleNotFoundError):
-    """The package was not found."""
-
-
-class EntryPoint(
-        collections.namedtuple('EntryPointBase', 'name value group')):
-    """An entry point as defined by Python packaging conventions.
-
-    See `the packaging docs on entry points
-    <https://packaging.python.org/specifications/entry-points/>`_
-    for more information.
-    """
-
-    pattern = re.compile(
-        r'(?P<module>[\w.]+)\s*'
-        r'(:\s*(?P<attr>[\w.]+))?\s*'
-        r'(?P<extras>\[.*\])?\s*$'
-        )
-    """
-    A regular expression describing the syntax for an entry point,
-    which might look like:
-
-        - module
-        - package.module
-        - package.module:attribute
-        - package.module:object.attribute
-        - package.module:attr [extra1, extra2]
-
-    Other combinations are possible as well.
-
-    The expression is lenient about whitespace around the ':',
-    following the attr, and following any extras.
-    """
-
-    def load(self):
-        """Load the entry point from its definition. If only a module
-        is indicated by the value, return that module. Otherwise,
-        return the named object.
-        """
-        match = self.pattern.match(self.value)
-        module = import_module(match.group('module'))
-        attrs = filter(None, (match.group('attr') or '').split('.'))
-        return functools.reduce(getattr, attrs, module)
-
-    @property
-    def module(self):
-        match = self.pattern.match(self.value)
-        return match.group('module')
-
-    @property
-    def attr(self):
-        match = self.pattern.match(self.value)
-        return match.group('attr')
-
-    @property
-    def extras(self):
-        match = self.pattern.match(self.value)
-        return list(re.finditer(r'\w+', match.group('extras') or ''))
-
-    @classmethod
-    def _from_config(cls, config):
-        return [
-            cls(name, value, group)
-            for group in config.sections()
-            for name, value in config.items(group)
-            ]
-
-    @classmethod
-    def _from_text(cls, text):
-        config = ConfigParser(delimiters='=')
-        # case sensitive: https://stackoverflow.com/q/1611799/812183
-        config.optionxform = str
-        try:
-            config.read_string(text)
-        except AttributeError:  # pragma: nocover
-            # Python 2 has no read_string
-            config.readfp(io.StringIO(text))
-        return EntryPoint._from_config(config)
-
-    def __iter__(self):
-        """
-        Supply iter so one may construct dicts of EntryPoints easily.
-        """
-        return iter((self.name, self))
-
-    def __reduce__(self):
-        return (
-            self.__class__,
-            (self.name, self.value, self.group),
-            )
-
-
-class PackagePath(pathlib.PurePosixPath):
-    """A reference to a path in a package"""
-
-    def read_text(self, encoding='utf-8'):
-        with self.locate().open(encoding=encoding) as stream:
-            return stream.read()
-
-    def read_binary(self):
-        with self.locate().open('rb') as stream:
-            return stream.read()
-
-    def locate(self):
-        """Return a path-like object for this path"""
-        return self.dist.locate_file(self)
-
-
-class FileHash:
-    def __init__(self, spec):
-        self.mode, _, self.value = spec.partition('=')
-
-    def __repr__(self):
-        return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
-
-
-class Distribution:
-    """A Python distribution package."""
-
-    @abc.abstractmethod
-    def read_text(self, filename):
-        """Attempt to load metadata file given by the name.
-
-        :param filename: The name of the file in the distribution info.
-        :return: The text if found, otherwise None.
-        """
-
-    @abc.abstractmethod
-    def locate_file(self, path):
-        """
-        Given a path to a file in this distribution, return a path
-        to it.
-        """
-
-    @classmethod
-    def from_name(cls, name):
-        """Return the Distribution for the given package name.
-
-        :param name: The name of the distribution package to search for.
-        :return: The Distribution instance (or subclass thereof) for the named
-            package, if found.
-        :raises PackageNotFoundError: When the named package's distribution
-            metadata cannot be found.
-        """
-        for resolver in cls._discover_resolvers():
-            dists = resolver(DistributionFinder.Context(name=name))
-            dist = next(iter(dists), None)
-            if dist is not None:
-                return dist
-        else:
-            raise PackageNotFoundError(name)
-
-    @classmethod
-    def discover(cls, **kwargs):
-        """Return an iterable of Distribution objects for all packages.
-
-        Pass a ``context`` or pass keyword arguments for constructing
-        a context.
-
-        :context: A ``DistributionFinder.Context`` object.
-        :return: Iterable of Distribution objects for all packages.
-        """
-        context = kwargs.pop('context', None)
-        if context and kwargs:
-            raise ValueError("cannot accept context and kwargs")
-        context = context or DistributionFinder.Context(**kwargs)
-        return itertools.chain.from_iterable(
-            resolver(context)
-            for resolver in cls._discover_resolvers()
-            )
-
-    @staticmethod
-    def at(path):
-        """Return a Distribution for the indicated metadata path
-
-        :param path: a string or path-like object
-        :return: a concrete Distribution instance for the path
-        """
-        return PathDistribution(pathlib.Path(path))
-
-    @staticmethod
-    def _discover_resolvers():
-        """Search the meta_path for resolvers."""
-        declared = (
-            getattr(finder, 'find_distributions', None)
-            for finder in sys.meta_path
-            )
-        return filter(None, declared)
-
-    @classmethod
-    def _local(cls, root='.'):
-        from pep517 import build, meta
-        system = build.compat_system(root)
-        builder = functools.partial(
-            meta.build,
-            source_dir=root,
-            system=system,
-            )
-        return PathDistribution(zipfile.Path(meta.build_as_zip(builder)))
-
-    @property
-    def metadata(self):
-        """Return the parsed metadata for this Distribution.
-
-        The returned object will have keys that name the various bits of
-        metadata.  See PEP 566 for details.
-        """
-        text = (
-            self.read_text('METADATA')
-            or self.read_text('PKG-INFO')
-            # This last clause is here to support old egg-info files.  Its
-            # effect is to just end up using the PathDistribution's self._path
-            # (which points to the egg-info file) attribute unchanged.
-            or self.read_text('')
-            )
-        return email.message_from_string(text)
-
-    @property
-    def version(self):
-        """Return the 'Version' metadata for the distribution package."""
-        return self.metadata['Version']
-
-    @property
-    def entry_points(self):
-        return EntryPoint._from_text(self.read_text('entry_points.txt'))
-
-    @property
-    def files(self):
-        """Files in this distribution.
-
-        :return: List of PackagePath for this distribution or None
-
-        Result is `None` if the metadata file that enumerates files
-        (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
-        missing.
-        Result may be empty if the metadata exists but is empty.
-        """
-        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
-
-        def make_file(name, hash=None, size_str=None):
-            result = PackagePath(name)
-            result.hash = FileHash(hash) if hash else None
-            result.size = int(size_str) if size_str else None
-            result.dist = self
-            return result
-
-        return file_lines and list(starmap(make_file, csv.reader(file_lines)))
-
-    def _read_files_distinfo(self):
-        """
-        Read the lines of RECORD
-        """
-        text = self.read_text('RECORD')
-        return text and text.splitlines()
-
-    def _read_files_egginfo(self):
-        """
-        SOURCES.txt might contain literal commas, so wrap each line
-        in quotes.
-        """
-        text = self.read_text('SOURCES.txt')
-        return text and map('"{}"'.format, text.splitlines())
-
-    @property
-    def requires(self):
-        """Generated requirements specified for this Distribution"""
-        reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
-        return reqs and list(reqs)
-
-    def _read_dist_info_reqs(self):
-        return self.metadata.get_all('Requires-Dist')
-
-    def _read_egg_info_reqs(self):
-        source = self.read_text('requires.txt')
-        return source and self._deps_from_requires_text(source)
-
-    @classmethod
-    def _deps_from_requires_text(cls, source):
-        section_pairs = cls._read_sections(source.splitlines())
-        sections = {
-            section: list(map(operator.itemgetter('line'), results))
-            for section, results in
-            itertools.groupby(section_pairs, operator.itemgetter('section'))
-            }
-        return cls._convert_egg_info_reqs_to_simple_reqs(sections)
-
-    @staticmethod
-    def _read_sections(lines):
-        section = None
-        for line in filter(None, lines):
-            section_match = re.match(r'\[(.*)\]$', line)
-            if section_match:
-                section = section_match.group(1)
-                continue
-            yield locals()
-
-    @staticmethod
-    def _convert_egg_info_reqs_to_simple_reqs(sections):
-        """
-        Historically, setuptools would solicit and store 'extra'
-        requirements, including those with environment markers,
-        in separate sections. More modern tools expect each
-        dependency to be defined separately, with any relevant
-        extras and environment markers attached directly to that
-        requirement. This method converts the former to the
-        latter. See _test_deps_from_requires_text for an example.
-        """
-        def make_condition(name):
-            return name and 'extra == "{name}"'.format(name=name)
-
-        def parse_condition(section):
-            section = section or ''
-            extra, sep, markers = section.partition(':')
-            if extra and markers:
-                markers = '({markers})'.format(markers=markers)
-            conditions = list(filter(None, [markers, make_condition(extra)]))
-            return '; ' + ' and '.join(conditions) if conditions else ''
-
-        for section, deps in sections.items():
-            for dep in deps:
-                yield dep + parse_condition(section)
-
-
-class DistributionFinder(MetaPathFinder):
-    """
-    A MetaPathFinder capable of discovering installed distributions.
-    """
-
-    class Context:
-        """
-        Keyword arguments presented by the caller to
-        ``distributions()`` or ``Distribution.discover()``
-        to narrow the scope of a search for distributions
-        in all DistributionFinders.
-
-        Each DistributionFinder may expect any parameters
-        and should attempt to honor the canonical
-        parameters defined below when appropriate.
-        """
-
-        name = None
-        """
-        Specific name for which a distribution finder should match.
-        A name of ``None`` matches all distributions.
-        """
-
-        def __init__(self, **kwargs):
-            vars(self).update(kwargs)
-
-        @property
-        def path(self):
-            """
-            The path that a distribution finder should search.
-
-            Typically refers to Python package paths and defaults
-            to ``sys.path``.
-            """
-            return vars(self).get('path', sys.path)
-
-    @abc.abstractmethod
-    def find_distributions(self, context=Context()):
-        """
-        Find distributions.
-
-        Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching the ``context``,
-        a DistributionFinder.Context instance.
-        """
-
-
-class FastPath:
-    """
-    Micro-optimized class for searching a path for
-    children.
-    """
-
-    def __init__(self, root):
-        self.root = root
-        self.base = os.path.basename(self.root).lower()
-
-    def joinpath(self, child):
-        return pathlib.Path(self.root, child)
-
-    def children(self):
-        with suppress(Exception):
-            return os.listdir(self.root or '')
-        with suppress(Exception):
-            return self.zip_children()
-        return []
-
-    def zip_children(self):
-        zip_path = zipfile.Path(self.root)
-        names = zip_path.root.namelist()
-        self.joinpath = zip_path.joinpath
-
-        return dict.fromkeys(
-            child.split(posixpath.sep, 1)[0]
-            for child in names
-            )
-
-    def is_egg(self, search):
-        base = self.base
-        return (
-            base == search.versionless_egg_name
-            or base.startswith(search.prefix)
-            and base.endswith('.egg'))
-
-    def search(self, name):
-        for child in self.children():
-            n_low = child.lower()
-            if (n_low in name.exact_matches
-                    or n_low.startswith(name.prefix)
-                    and n_low.endswith(name.suffixes)
-                    # legacy case:
-                    or self.is_egg(name) and n_low == 'egg-info'):
-                yield self.joinpath(child)
-
-
-class Prepared:
-    """
-    A prepared search for metadata on a possibly-named package.
-    """
-    normalized = ''
-    prefix = ''
-    suffixes = '.dist-info', '.egg-info'
-    exact_matches = [''][:0]
-    versionless_egg_name = ''
-
-    def __init__(self, name):
-        self.name = name
-        if name is None:
-            return
-        self.normalized = name.lower().replace('-', '_')
-        self.prefix = self.normalized + '-'
-        self.exact_matches = [
-            self.normalized + suffix for suffix in self.suffixes]
-        self.versionless_egg_name = self.normalized + '.egg'
-
-
-class MetadataPathFinder(DistributionFinder):
-    @classmethod
-    def find_distributions(cls, context=DistributionFinder.Context()):
-        """
-        Find distributions.
-
-        Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching ``context.name``
-        (or all names if ``None`` indicated) along the paths in the list
-        of directories ``context.path``.
-        """
-        found = cls._search_paths(context.name, context.path)
-        return map(PathDistribution, found)
-
-    @classmethod
-    def _search_paths(cls, name, paths):
-        """Find metadata directories in paths heuristically."""
-        return itertools.chain.from_iterable(
-            path.search(Prepared(name))
-            for path in map(FastPath, paths)
-            )
-
-
-class PathDistribution(Distribution):
-    def __init__(self, path):
-        """Construct a distribution from a path to the metadata directory.
-
-        :param path: A pathlib.Path or similar object supporting
-                     .joinpath(), __div__, .parent, and .read_text().
-        """
-        self._path = path
-
-    def read_text(self, filename):
-        with suppress(FileNotFoundError, IsADirectoryError, KeyError,
-                      NotADirectoryError, PermissionError):
-            return self._path.joinpath(filename).read_text(encoding='utf-8')
-    read_text.__doc__ = Distribution.read_text.__doc__
-
-    def locate_file(self, path):
-        return self._path.parent / path
-
-
-def distribution(distribution_name):
-    """Get the ``Distribution`` instance for the named package.
-
-    :param distribution_name: The name of the distribution package as a string.
-    :return: A ``Distribution`` instance (or subclass thereof).
-    """
-    return Distribution.from_name(distribution_name)
-
-
-def distributions(**kwargs):
-    """Get all ``Distribution`` instances in the current environment.
-
-    :return: An iterable of ``Distribution`` instances.
-    """
-    return Distribution.discover(**kwargs)
-
-
-def metadata(distribution_name):
-    """Get the metadata for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: An email.Message containing the parsed metadata.
-    """
-    return Distribution.from_name(distribution_name).metadata
-
-
-def version(distribution_name):
-    """Get the version string for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: The version string for the package as defined in the package's
-        "Version" metadata key.
-    """
-    return distribution(distribution_name).version
-
-
-def entry_points():
-    """Return EntryPoint objects for all installed packages.
-
-    :return: EntryPoint objects for all installed packages.
-    """
-    eps = itertools.chain.from_iterable(
-        dist.entry_points for dist in distributions())
-    by_group = operator.attrgetter('group')
-    ordered = sorted(eps, key=by_group)
-    grouped = itertools.groupby(ordered, by_group)
-    return {
-        group: tuple(eps)
-        for group, eps in grouped
-        }
-
-
-def files(distribution_name):
-    """Return a list of files for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: List of files composing the distribution.
-    """
-    return distribution(distribution_name).files
-
-
-def requires(distribution_name):
-    """
-    Return a list of requirements for the named package.
-
-    :return: An iterator of requirements, suitable for
-    packaging.requirement.Requirement.
-    """
-    return distribution(distribution_name).requires
diff --git a/Lib/importlib/metadata/__init__.py b/Lib/importlib/metadata/__init__.py
new file mode 100644
index 0000000..b3063cd
--- /dev/null
+++ b/Lib/importlib/metadata/__init__.py
@@ -0,0 +1,1045 @@
+import os
+import re
+import abc
+import csv
+import sys
+import email
+import pathlib
+import zipfile
+import operator
+import textwrap
+import warnings
+import functools
+import itertools
+import posixpath
+import collections
+
+from . import _adapters, _meta
+from ._meta import PackageMetadata
+from ._collections import FreezableDefaultDict, Pair
+from ._functools import method_cache
+from ._itertools import unique_everseen
+from ._meta import PackageMetadata, SimplePath
+
+from contextlib import suppress
+from importlib import import_module
+from importlib.abc import MetaPathFinder
+from itertools import starmap
+from typing import List, Mapping, Optional, Union
+
+
+__all__ = [
+    'Distribution',
+    'DistributionFinder',
+    'PackageMetadata',
+    'PackageNotFoundError',
+    'distribution',
+    'distributions',
+    'entry_points',
+    'files',
+    'metadata',
+    'packages_distributions',
+    'requires',
+    'version',
+]
+
+
+class PackageNotFoundError(ModuleNotFoundError):
+    """The package was not found."""
+
+    def __str__(self):
+        return f"No package metadata was found for {self.name}"
+
+    @property
+    def name(self):
+        (name,) = self.args
+        return name
+
+
+class Sectioned:
+    """
+    A simple entry point config parser for performance
+
+    >>> for item in Sectioned.read(Sectioned._sample):
+    ...     print(item)
+    Pair(name='sec1', value='# comments ignored')
+    Pair(name='sec1', value='a = 1')
+    Pair(name='sec1', value='b = 2')
+    Pair(name='sec2', value='a = 2')
+
+    >>> res = Sectioned.section_pairs(Sectioned._sample)
+    >>> item = next(res)
+    >>> item.name
+    'sec1'
+    >>> item.value
+    Pair(name='a', value='1')
+    >>> item = next(res)
+    >>> item.value
+    Pair(name='b', value='2')
+    >>> item = next(res)
+    >>> item.name
+    'sec2'
+    >>> item.value
+    Pair(name='a', value='2')
+    >>> list(res)
+    []
+    """
+
+    _sample = textwrap.dedent(
+        """
+        [sec1]
+        # comments ignored
+        a = 1
+        b = 2
+
+        [sec2]
+        a = 2
+        """
+    ).lstrip()
+
+    @classmethod
+    def section_pairs(cls, text):
+        return (
+            section._replace(value=Pair.parse(section.value))
+            for section in cls.read(text, filter_=cls.valid)
+            if section.name is not None
+        )
+
+    @staticmethod
+    def read(text, filter_=None):
+        lines = filter(filter_, map(str.strip, text.splitlines()))
+        name = None
+        for value in lines:
+            section_match = value.startswith('[') and value.endswith(']')
+            if section_match:
+                name = value.strip('[]')
+                continue
+            yield Pair(name, value)
+
+    @staticmethod
+    def valid(line):
+        return line and not line.startswith('#')
+
+
+class EntryPoint(
+        collections.namedtuple('EntryPointBase', 'name value group')):
+    """An entry point as defined by Python packaging conventions.
+
+    See `the packaging docs on entry points
+    <https://packaging.python.org/specifications/entry-points/>`_
+    for more information.
+
+    >>> ep = EntryPoint(
+    ...     name=None, group=None, value='package.module:attr [extra1, extra2]')
+    >>> ep.module
+    'package.module'
+    >>> ep.attr
+    'attr'
+    >>> ep.extras
+    ['extra1', 'extra2']
+    """
+
+    pattern = re.compile(
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+)\s*)?'
+        r'((?P<extras>\[.*\])\s*)?$'
+    )
+    """
+    A regular expression describing the syntax for an entry point,
+    which might look like:
+
+        - module
+        - package.module
+        - package.module:attribute
+        - package.module:object.attribute
+        - package.module:attr [extra1, extra2]
+
+    Other combinations are possible as well.
+
+    The expression is lenient about whitespace around the ':',
+    following the attr, and following any extras.
+    """
+
+    dist: Optional['Distribution'] = None
+
+    def load(self):
+        """Load the entry point from its definition. If only a module
+        is indicated by the value, return that module. Otherwise,
+        return the named object.
+        """
+        match = self.pattern.match(self.value)
+        module = import_module(match.group('module'))
+        attrs = filter(None, (match.group('attr') or '').split('.'))
+        return functools.reduce(getattr, attrs, module)
+
+    @property
+    def module(self):
+        match = self.pattern.match(self.value)
+        return match.group('module')
+
+    @property
+    def attr(self):
+        match = self.pattern.match(self.value)
+        return match.group('attr')
+
+    @property
+    def extras(self):
+        match = self.pattern.match(self.value)
+        return re.findall(r'\w+', match.group('extras') or '')
+
+    def _for(self, dist):
+        self.dist = dist
+        return self
+
+    def __iter__(self):
+        """
+        Supply iter so one may construct dicts of EntryPoints by name.
+        """
+        msg = (
+            "Construction of dict of EntryPoints is deprecated in "
+            "favor of EntryPoints."
+        )
+        warnings.warn(msg, DeprecationWarning)
+        return iter((self.name, self))
+
+    def __reduce__(self):
+        return (
+            self.__class__,
+            (self.name, self.value, self.group),
+        )
+
+    def matches(self, **params):
+        """
+        EntryPoint matches the given parameters.
+
+        >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
+        >>> ep.matches(group='foo')
+        True
+        >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
+        True
+        >>> ep.matches(group='foo', name='other')
+        False
+        >>> ep.matches()
+        True
+        >>> ep.matches(extras=['extra1', 'extra2'])
+        True
+        >>> ep.matches(module='bing')
+        True
+        >>> ep.matches(attr='bong')
+        True
+        """
+        attrs = (getattr(self, param) for param in params)
+        return all(map(operator.eq, params.values(), attrs))
+
+
+class DeprecatedList(list):
+    """
+    Allow an otherwise immutable object to implement mutability
+    for compatibility.
+
+    >>> recwarn = getfixture('recwarn')
+    >>> dl = DeprecatedList(range(3))
+    >>> dl[0] = 1
+    >>> dl.append(3)
+    >>> del dl[3]
+    >>> dl.reverse()
+    >>> dl.sort()
+    >>> dl.extend([4])
+    >>> dl.pop(-1)
+    4
+    >>> dl.remove(1)
+    >>> dl += [5]
+    >>> dl + [6]
+    [1, 2, 5, 6]
+    >>> dl + (6,)
+    [1, 2, 5, 6]
+    >>> dl.insert(0, 0)
+    >>> dl
+    [0, 1, 2, 5]
+    >>> dl == [0, 1, 2, 5]
+    True
+    >>> dl == (0, 1, 2, 5)
+    True
+    >>> len(recwarn)
+    1
+    """
+
+    __slots__ = ()
+
+    _warn = functools.partial(
+        warnings.warn,
+        "EntryPoints list interface is deprecated. Cast to list if needed.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    def __setitem__(self, *args, **kwargs):
+        self._warn()
+        return super().__setitem__(*args, **kwargs)
+
+    def __delitem__(self, *args, **kwargs):
+        self._warn()
+        return super().__delitem__(*args, **kwargs)
+
+    def append(self, *args, **kwargs):
+        self._warn()
+        return super().append(*args, **kwargs)
+
+    def reverse(self, *args, **kwargs):
+        self._warn()
+        return super().reverse(*args, **kwargs)
+
+    def extend(self, *args, **kwargs):
+        self._warn()
+        return super().extend(*args, **kwargs)
+
+    def pop(self, *args, **kwargs):
+        self._warn()
+        return super().pop(*args, **kwargs)
+
+    def remove(self, *args, **kwargs):
+        self._warn()
+        return super().remove(*args, **kwargs)
+
+    def __iadd__(self, *args, **kwargs):
+        self._warn()
+        return super().__iadd__(*args, **kwargs)
+
+    def __add__(self, other):
+        if not isinstance(other, tuple):
+            self._warn()
+            other = tuple(other)
+        return self.__class__(tuple(self) + other)
+
+    def insert(self, *args, **kwargs):
+        self._warn()
+        return super().insert(*args, **kwargs)
+
+    def sort(self, *args, **kwargs):
+        self._warn()
+        return super().sort(*args, **kwargs)
+
+    def __eq__(self, other):
+        if not isinstance(other, tuple):
+            self._warn()
+            other = tuple(other)
+
+        return tuple(self).__eq__(other)
+
+
+class EntryPoints(DeprecatedList):
+    """
+    An immutable collection of selectable EntryPoint objects.
+    """
+
+    __slots__ = ()
+
+    def __getitem__(self, name):  # -> EntryPoint:
+        """
+        Get the EntryPoint in self matching name.
+        """
+        if isinstance(name, int):
+            warnings.warn(
+                "Accessing entry points by index is deprecated. "
+                "Cast to tuple if needed.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            return super().__getitem__(name)
+        try:
+            return next(iter(self.select(name=name)))
+        except StopIteration:
+            raise KeyError(name)
+
+    def select(self, **params):
+        """
+        Select entry points from self that match the
+        given parameters (typically group and/or name).
+        """
+        return EntryPoints(ep for ep in self if ep.matches(**params))
+
+    @property
+    def names(self):
+        """
+        Return the set of all names of all entry points.
+        """
+        return set(ep.name for ep in self)
+
+    @property
+    def groups(self):
+        """
+        Return the set of all groups of all entry points.
+
+        For coverage while SelectableGroups is present.
+        >>> EntryPoints().groups
+        set()
+        """
+        return set(ep.group for ep in self)
+
+    @classmethod
+    def _from_text_for(cls, text, dist):
+        return cls(ep._for(dist) for ep in cls._from_text(text))
+
+    @classmethod
+    def _from_text(cls, text):
+        return itertools.starmap(EntryPoint, cls._parse_groups(text or ''))
+
+    @staticmethod
+    def _parse_groups(text):
+        return (
+            (item.value.name, item.value.value, item.name)
+            for item in Sectioned.section_pairs(text)
+        )
+
+
+class Deprecated:
+    """
+    Compatibility add-in for mapping to indicate that
+    mapping behavior is deprecated.
+
+    >>> recwarn = getfixture('recwarn')
+    >>> class DeprecatedDict(Deprecated, dict): pass
+    >>> dd = DeprecatedDict(foo='bar')
+    >>> dd.get('baz', None)
+    >>> dd['foo']
+    'bar'
+    >>> list(dd)
+    ['foo']
+    >>> list(dd.keys())
+    ['foo']
+    >>> 'foo' in dd
+    True
+    >>> list(dd.values())
+    ['bar']
+    >>> len(recwarn)
+    1
+    """
+
+    _warn = functools.partial(
+        warnings.warn,
+        "SelectableGroups dict interface is deprecated. Use select.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+
+    def __getitem__(self, name):
+        self._warn()
+        return super().__getitem__(name)
+
+    def get(self, name, default=None):
+        self._warn()
+        return super().get(name, default)
+
+    def __iter__(self):
+        self._warn()
+        return super().__iter__()
+
+    def __contains__(self, *args):
+        self._warn()
+        return super().__contains__(*args)
+
+    def keys(self):
+        self._warn()
+        return super().keys()
+
+    def values(self):
+        self._warn()
+        return super().values()
+
+
+class SelectableGroups(Deprecated, dict):
+    """
+    A backward- and forward-compatible result from
+    entry_points that fully implements the dict interface.
+    """
+
+    @classmethod
+    def load(cls, eps):
+        by_group = operator.attrgetter('group')
+        ordered = sorted(eps, key=by_group)
+        grouped = itertools.groupby(ordered, by_group)
+        return cls((group, EntryPoints(eps)) for group, eps in grouped)
+
+    @property
+    def _all(self):
+        """
+        Reconstruct a list of all entrypoints from the groups.
+        """
+        groups = super(Deprecated, self).values()
+        return EntryPoints(itertools.chain.from_iterable(groups))
+
+    @property
+    def groups(self):
+        return self._all.groups
+
+    @property
+    def names(self):
+        """
+        for coverage:
+        >>> SelectableGroups().names
+        set()
+        """
+        return self._all.names
+
+    def select(self, **params):
+        if not params:
+            return self
+        return self._all.select(**params)
+
+
+class PackagePath(pathlib.PurePosixPath):
+    """A reference to a path in a package"""
+
+    def read_text(self, encoding='utf-8'):
+        with self.locate().open(encoding=encoding) as stream:
+            return stream.read()
+
+    def read_binary(self):
+        with self.locate().open('rb') as stream:
+            return stream.read()
+
+    def locate(self):
+        """Return a path-like object for this path"""
+        return self.dist.locate_file(self)
+
+
+class FileHash:
+    def __init__(self, spec):
+        self.mode, _, self.value = spec.partition('=')
+
+    def __repr__(self):
+        return f'<FileHash mode: {self.mode} value: {self.value}>'
+
+
+class Distribution:
+    """A Python distribution package."""
+
+    @abc.abstractmethod
+    def read_text(self, filename):
+        """Attempt to load metadata file given by the name.
+
+        :param filename: The name of the file in the distribution info.
+        :return: The text if found, otherwise None.
+        """
+
+    @abc.abstractmethod
+    def locate_file(self, path):
+        """
+        Given a path to a file in this distribution, return a path
+        to it.
+        """
+
+    @classmethod
+    def from_name(cls, name):
+        """Return the Distribution for the given package name.
+
+        :param name: The name of the distribution package to search for.
+        :return: The Distribution instance (or subclass thereof) for the named
+            package, if found.
+        :raises PackageNotFoundError: When the named package's distribution
+            metadata cannot be found.
+        """
+        for resolver in cls._discover_resolvers():
+            dists = resolver(DistributionFinder.Context(name=name))
+            dist = next(iter(dists), None)
+            if dist is not None:
+                return dist
+        else:
+            raise PackageNotFoundError(name)
+
+    @classmethod
+    def discover(cls, **kwargs):
+        """Return an iterable of Distribution objects for all packages.
+
+        Pass a ``context`` or pass keyword arguments for constructing
+        a context.
+
+        :context: A ``DistributionFinder.Context`` object.
+        :return: Iterable of Distribution objects for all packages.
+        """
+        context = kwargs.pop('context', None)
+        if context and kwargs:
+            raise ValueError("cannot accept context and kwargs")
+        context = context or DistributionFinder.Context(**kwargs)
+        return itertools.chain.from_iterable(
+            resolver(context) for resolver in cls._discover_resolvers()
+        )
+
+    @staticmethod
+    def at(path):
+        """Return a Distribution for the indicated metadata path
+
+        :param path: a string or path-like object
+        :return: a concrete Distribution instance for the path
+        """
+        return PathDistribution(pathlib.Path(path))
+
+    @staticmethod
+    def _discover_resolvers():
+        """Search the meta_path for resolvers."""
+        declared = (
+            getattr(finder, 'find_distributions', None) for finder in sys.meta_path
+        )
+        return filter(None, declared)
+
+    @classmethod
+    def _local(cls, root='.'):
+        from pep517 import build, meta
+
+        system = build.compat_system(root)
+        builder = functools.partial(
+            meta.build,
+            source_dir=root,
+            system=system,
+        )
+        return PathDistribution(zipfile.Path(meta.build_as_zip(builder)))
+
+    @property
+    def metadata(self) -> _meta.PackageMetadata:
+        """Return the parsed metadata for this Distribution.
+
+        The returned object will have keys that name the various bits of
+        metadata.  See PEP 566 for details.
+        """
+        text = (
+            self.read_text('METADATA')
+            or self.read_text('PKG-INFO')
+            # This last clause is here to support old egg-info files.  Its
+            # effect is to just end up using the PathDistribution's self._path
+            # (which points to the egg-info file) attribute unchanged.
+            or self.read_text('')
+        )
+        return _adapters.Message(email.message_from_string(text))
+
+    @property
+    def name(self):
+        """Return the 'Name' metadata for the distribution package."""
+        return self.metadata['Name']
+
+    @property
+    def _normalized_name(self):
+        """Return a normalized version of the name."""
+        return Prepared.normalize(self.name)
+
+    @property
+    def version(self):
+        """Return the 'Version' metadata for the distribution package."""
+        return self.metadata['Version']
+
+    @property
+    def entry_points(self):
+        return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
+
+    @property
+    def files(self):
+        """Files in this distribution.
+
+        :return: List of PackagePath for this distribution or None
+
+        Result is `None` if the metadata file that enumerates files
+        (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
+        missing.
+        Result may be empty if the metadata exists but is empty.
+        """
+        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
+
+        def make_file(name, hash=None, size_str=None):
+            result = PackagePath(name)
+            result.hash = FileHash(hash) if hash else None
+            result.size = int(size_str) if size_str else None
+            result.dist = self
+            return result
+
+        return file_lines and list(starmap(make_file, csv.reader(file_lines)))
+
+    def _read_files_distinfo(self):
+        """
+        Read the lines of RECORD
+        """
+        text = self.read_text('RECORD')
+        return text and text.splitlines()
+
+    def _read_files_egginfo(self):
+        """
+        SOURCES.txt might contain literal commas, so wrap each line
+        in quotes.
+        """
+        text = self.read_text('SOURCES.txt')
+        return text and map('"{}"'.format, text.splitlines())
+
+    @property
+    def requires(self):
+        """Generated requirements specified for this Distribution"""
+        reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
+        return reqs and list(reqs)
+
+    def _read_dist_info_reqs(self):
+        return self.metadata.get_all('Requires-Dist')
+
+    def _read_egg_info_reqs(self):
+        source = self.read_text('requires.txt')
+        return None if source is None else self._deps_from_requires_text(source)
+
+    @classmethod
+    def _deps_from_requires_text(cls, source):
+        return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
+
+    @staticmethod
+    def _convert_egg_info_reqs_to_simple_reqs(sections):
+        """
+        Historically, setuptools would solicit and store 'extra'
+        requirements, including those with environment markers,
+        in separate sections. More modern tools expect each
+        dependency to be defined separately, with any relevant
+        extras and environment markers attached directly to that
+        requirement. This method converts the former to the
+        latter. See _test_deps_from_requires_text for an example.
+        """
+
+        def make_condition(name):
+            return name and f'extra == "{name}"'
+
+        def quoted_marker(section):
+            section = section or ''
+            extra, sep, markers = section.partition(':')
+            if extra and markers:
+                markers = f'({markers})'
+            conditions = list(filter(None, [markers, make_condition(extra)]))
+            return '; ' + ' and '.join(conditions) if conditions else ''
+
+        def url_req_space(req):
+            """
+            PEP 508 requires a space between the url_spec and the quoted_marker.
+            Ref python/importlib_metadata#357.
+            """
+            # '@' is uniquely indicative of a url_req.
+            return ' ' * ('@' in req)
+
+        for section in sections:
+            space = url_req_space(section.value)
+            yield section.value + space + quoted_marker(section.name)
+
+
+class DistributionFinder(MetaPathFinder):
+    """
+    A MetaPathFinder capable of discovering installed distributions.
+    """
+
+    class Context:
+        """
+        Keyword arguments presented by the caller to
+        ``distributions()`` or ``Distribution.discover()``
+        to narrow the scope of a search for distributions
+        in all DistributionFinders.
+
+        Each DistributionFinder may expect any parameters
+        and should attempt to honor the canonical
+        parameters defined below when appropriate.
+        """
+
+        name = None
+        """
+        Specific name for which a distribution finder should match.
+        A name of ``None`` matches all distributions.
+        """
+
+        def __init__(self, **kwargs):
+            vars(self).update(kwargs)
+
+        @property
+        def path(self):
+            """
+            The sequence of directory path that a distribution finder
+            should search.
+
+            Typically refers to Python installed package paths such as
+            "site-packages" directories and defaults to ``sys.path``.
+            """
+            return vars(self).get('path', sys.path)
+
+    @abc.abstractmethod
+    def find_distributions(self, context=Context()):
+        """
+        Find distributions.
+
+        Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching the ``context``,
+        a DistributionFinder.Context instance.
+        """
+
+
+class FastPath:
+    """
+    Micro-optimized class for searching a path for
+    children.
+    """
+
+    @functools.lru_cache()  # type: ignore
+    def __new__(cls, root):
+        return super().__new__(cls)
+
+    def __init__(self, root):
+        self.root = root
+
+    def joinpath(self, child):
+        return pathlib.Path(self.root, child)
+
+    def children(self):
+        with suppress(Exception):
+            return os.listdir(self.root or '.')
+        with suppress(Exception):
+            return self.zip_children()
+        return []
+
+    def zip_children(self):
+        zip_path = zipfile.Path(self.root)
+        names = zip_path.root.namelist()
+        self.joinpath = zip_path.joinpath
+
+        return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
+
+    def search(self, name):
+        return self.lookup(self.mtime).search(name)
+
+    @property
+    def mtime(self):
+        with suppress(OSError):
+            return os.stat(self.root).st_mtime
+        self.lookup.cache_clear()
+
+    @method_cache
+    def lookup(self, mtime):
+        return Lookup(self)
+
+
+class Lookup:
+    def __init__(self, path: FastPath):
+        base = os.path.basename(path.root).lower()
+        base_is_egg = base.endswith(".egg")
+        self.infos = FreezableDefaultDict(list)
+        self.eggs = FreezableDefaultDict(list)
+
+        for child in path.children():
+            low = child.lower()
+            if low.endswith((".dist-info", ".egg-info")):
+                # rpartition is faster than splitext and suitable for this purpose.
+                name = low.rpartition(".")[0].partition("-")[0]
+                normalized = Prepared.normalize(name)
+                self.infos[normalized].append(path.joinpath(child))
+            elif base_is_egg and low == "egg-info":
+                name = base.rpartition(".")[0].partition("-")[0]
+                legacy_normalized = Prepared.legacy_normalize(name)
+                self.eggs[legacy_normalized].append(path.joinpath(child))
+
+        self.infos.freeze()
+        self.eggs.freeze()
+
+    def search(self, prepared):
+        infos = (
+            self.infos[prepared.normalized]
+            if prepared
+            else itertools.chain.from_iterable(self.infos.values())
+        )
+        eggs = (
+            self.eggs[prepared.legacy_normalized]
+            if prepared
+            else itertools.chain.from_iterable(self.eggs.values())
+        )
+        return itertools.chain(infos, eggs)
+
+
+class Prepared:
+    """
+    A prepared search for metadata on a possibly-named package.
+    """
+
+    normalized = None
+    legacy_normalized = None
+
+    def __init__(self, name):
+        self.name = name
+        if name is None:
+            return
+        self.normalized = self.normalize(name)
+        self.legacy_normalized = self.legacy_normalize(name)
+
+    @staticmethod
+    def normalize(name):
+        """
+        PEP 503 normalization plus dashes as underscores.
+        """
+        return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
+
+    @staticmethod
+    def legacy_normalize(name):
+        """
+        Normalize the package name as found in the convention in
+        older packaging tools versions and specs.
+        """
+        return name.lower().replace('-', '_')
+
+    def __bool__(self):
+        return bool(self.name)
+
+
+class MetadataPathFinder(DistributionFinder):
+    @classmethod
+    def find_distributions(cls, context=DistributionFinder.Context()):
+        """
+        Find distributions.
+
+        Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching ``context.name``
+        (or all names if ``None`` indicated) along the paths in the list
+        of directories ``context.path``.
+        """
+        found = cls._search_paths(context.name, context.path)
+        return map(PathDistribution, found)
+
+    @classmethod
+    def _search_paths(cls, name, paths):
+        """Find metadata directories in paths heuristically."""
+        prepared = Prepared(name)
+        return itertools.chain.from_iterable(
+            path.search(prepared) for path in map(FastPath, paths)
+        )
+
+    def invalidate_caches(cls):
+        FastPath.__new__.cache_clear()
+
+
+class PathDistribution(Distribution):
+    def __init__(self, path: SimplePath):
+        """Construct a distribution.
+
+        :param path: SimplePath indicating the metadata directory.
+        """
+        self._path = path
+
+    def read_text(self, filename):
+        with suppress(
+            FileNotFoundError,
+            IsADirectoryError,
+            KeyError,
+            NotADirectoryError,
+            PermissionError,
+        ):
+            return self._path.joinpath(filename).read_text(encoding='utf-8')
+
+    read_text.__doc__ = Distribution.read_text.__doc__
+
+    def locate_file(self, path):
+        return self._path.parent / path
+
+    @property
+    def _normalized_name(self):
+        """
+        Performance optimization: where possible, resolve the
+        normalized name from the file system path.
+        """
+        stem = os.path.basename(str(self._path))
+        return self._name_from_stem(stem) or super()._normalized_name
+
+    def _name_from_stem(self, stem):
+        name, ext = os.path.splitext(stem)
+        if ext not in ('.dist-info', '.egg-info'):
+            return
+        name, sep, rest = stem.partition('-')
+        return name
+
+
+def distribution(distribution_name):
+    """Get the ``Distribution`` instance for the named package.
+
+    :param distribution_name: The name of the distribution package as a string.
+    :return: A ``Distribution`` instance (or subclass thereof).
+    """
+    return Distribution.from_name(distribution_name)
+
+
+def distributions(**kwargs):
+    """Get all ``Distribution`` instances in the current environment.
+
+    :return: An iterable of ``Distribution`` instances.
+    """
+    return Distribution.discover(**kwargs)
+
+
+def metadata(distribution_name) -> _meta.PackageMetadata:
+    """Get the metadata for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: A PackageMetadata containing the parsed metadata.
+    """
+    return Distribution.from_name(distribution_name).metadata
+
+
+def version(distribution_name):
+    """Get the version string for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: The version string for the package as defined in the package's
+        "Version" metadata key.
+    """
+    return distribution(distribution_name).version
+
+
+def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
+    """Return EntryPoint objects for all installed packages.
+
+    Pass selection parameters (group or name) to filter the
+    result to entry points matching those properties (see
+    EntryPoints.select()).
+
+    For compatibility, returns ``SelectableGroups`` object unless
+    selection parameters are supplied. In the future, this function
+    will return ``EntryPoints`` instead of ``SelectableGroups``
+    even when no selection parameters are supplied.
+
+    For maximum future compatibility, pass selection parameters
+    or invoke ``.select`` with parameters on the result.
+
+    :return: EntryPoints or SelectableGroups for all installed packages.
+    """
+    norm_name = operator.attrgetter('_normalized_name')
+    unique = functools.partial(unique_everseen, key=norm_name)
+    eps = itertools.chain.from_iterable(
+        dist.entry_points for dist in unique(distributions())
+    )
+    return SelectableGroups.load(eps).select(**params)
+
+
+def files(distribution_name):
+    """Return a list of files for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: List of files composing the distribution.
+    """
+    return distribution(distribution_name).files
+
+
+def requires(distribution_name):
+    """
+    Return a list of requirements for the named package.
+
+    :return: An iterator of requirements, suitable for
+        packaging.requirement.Requirement.
+    """
+    return distribution(distribution_name).requires
+
+
+def packages_distributions() -> Mapping[str, List[str]]:
+    """
+    Return a mapping of top-level packages to their
+    distributions.
+
+    >>> import collections.abc
+    >>> pkgs = packages_distributions()
+    >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
+    True
+    """
+    pkg_to_dist = collections.defaultdict(list)
+    for dist in distributions():
+        for pkg in (dist.read_text('top_level.txt') or '').split():
+            pkg_to_dist[pkg].append(dist.metadata['Name'])
+    return dict(pkg_to_dist)
diff --git a/Lib/importlib/metadata/_adapters.py b/Lib/importlib/metadata/_adapters.py
new file mode 100644
index 0000000..aa460d3
--- /dev/null
+++ b/Lib/importlib/metadata/_adapters.py
@@ -0,0 +1,68 @@
+import re
+import textwrap
+import email.message
+
+from ._text import FoldedCase
+
+
+class Message(email.message.Message):
+    multiple_use_keys = set(
+        map(
+            FoldedCase,
+            [
+                'Classifier',
+                'Obsoletes-Dist',
+                'Platform',
+                'Project-URL',
+                'Provides-Dist',
+                'Provides-Extra',
+                'Requires-Dist',
+                'Requires-External',
+                'Supported-Platform',
+                'Dynamic',
+            ],
+        )
+    )
+    """
+    Keys that may be indicated multiple times per PEP 566.
+    """
+
+    def __new__(cls, orig: email.message.Message):
+        res = super().__new__(cls)
+        vars(res).update(vars(orig))
+        return res
+
+    def __init__(self, *args, **kwargs):
+        self._headers = self._repair_headers()
+
+    # suppress spurious error from mypy
+    def __iter__(self):
+        return super().__iter__()
+
+    def _repair_headers(self):
+        def redent(value):
+            "Correct for RFC822 indentation"
+            if not value or '\n' not in value:
+                return value
+            return textwrap.dedent(' ' * 8 + value)
+
+        headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
+        if self._payload:
+            headers.append(('Description', self.get_payload()))
+        return headers
+
+    @property
+    def json(self):
+        """
+        Convert PackageMetadata to a JSON-compatible format
+        per PEP 0566.
+        """
+
+        def transform(key):
+            value = self.get_all(key) if key in self.multiple_use_keys else self[key]
+            if key == 'Keywords':
+                value = re.split(r'\s+', value)
+            tk = key.lower().replace('-', '_')
+            return tk, value
+
+        return dict(map(transform, map(FoldedCase, self)))
diff --git a/Lib/importlib/metadata/_collections.py b/Lib/importlib/metadata/_collections.py
new file mode 100644
index 0000000..cf0954e
--- /dev/null
+++ b/Lib/importlib/metadata/_collections.py
@@ -0,0 +1,30 @@
+import collections
+
+
+# from jaraco.collections 3.3
+class FreezableDefaultDict(collections.defaultdict):
+    """
+    Often it is desirable to prevent the mutation of
+    a default dict after its initial construction, such
+    as to prevent mutation during iteration.
+
+    >>> dd = FreezableDefaultDict(list)
+    >>> dd[0].append('1')
+    >>> dd.freeze()
+    >>> dd[1]
+    []
+    >>> len(dd)
+    1
+    """
+
+    def __missing__(self, key):
+        return getattr(self, '_frozen', super().__missing__)(key)
+
+    def freeze(self):
+        self._frozen = lambda key: self.default_factory()
+
+
+class Pair(collections.namedtuple('Pair', 'name value')):
+    @classmethod
+    def parse(cls, text):
+        return cls(*map(str.strip, text.split("=", 1)))
diff --git a/Lib/importlib/metadata/_functools.py b/Lib/importlib/metadata/_functools.py
new file mode 100644
index 0000000..73f50d0
--- /dev/null
+++ b/Lib/importlib/metadata/_functools.py
@@ -0,0 +1,85 @@
+import types
+import functools
+
+
+# from jaraco.functools 3.3
+def method_cache(method, cache_wrapper=None):
+    """
+    Wrap lru_cache to support storing the cache data in the object instances.
+
+    Abstracts the common paradigm where the method explicitly saves an
+    underscore-prefixed protected property on first call and returns that
+    subsequently.
+
+    >>> class MyClass:
+    ...     calls = 0
+    ...
+    ...     @method_cache
+    ...     def method(self, value):
+    ...         self.calls += 1
+    ...         return value
+
+    >>> a = MyClass()
+    >>> a.method(3)
+    3
+    >>> for x in range(75):
+    ...     res = a.method(x)
+    >>> a.calls
+    75
+
+    Note that the apparent behavior will be exactly like that of lru_cache
+    except that the cache is stored on each instance, so values in one
+    instance will not flush values from another, and when an instance is
+    deleted, so are the cached values for that instance.
+
+    >>> b = MyClass()
+    >>> for x in range(35):
+    ...     res = b.method(x)
+    >>> b.calls
+    35
+    >>> a.method(0)
+    0
+    >>> a.calls
+    75
+
+    Note that if method had been decorated with ``functools.lru_cache()``,
+    a.calls would have been 76 (due to the cached value of 0 having been
+    flushed by the 'b' instance).
+
+    Clear the cache with ``.cache_clear()``
+
+    >>> a.method.cache_clear()
+
+    Same for a method that hasn't yet been called.
+
+    >>> c = MyClass()
+    >>> c.method.cache_clear()
+
+    Another cache wrapper may be supplied:
+
+    >>> cache = functools.lru_cache(maxsize=2)
+    >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
+    >>> a = MyClass()
+    >>> a.method2()
+    3
+
+    Caution - do not subsequently wrap the method with another decorator, such
+    as ``@property``, which changes the semantics of the function.
+
+    See also
+    http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
+    for another implementation and additional justification.
+    """
+    cache_wrapper = cache_wrapper or functools.lru_cache()
+
+    def wrapper(self, *args, **kwargs):
+        # it's the first call, replace the method with a cached, bound method
+        bound_method = types.MethodType(method, self)
+        cached_method = cache_wrapper(bound_method)
+        setattr(self, method.__name__, cached_method)
+        return cached_method(*args, **kwargs)
+
+    # Support cache clear even before cache has been created.
+    wrapper.cache_clear = lambda: None
+
+    return wrapper
diff --git a/Lib/importlib/metadata/_itertools.py b/Lib/importlib/metadata/_itertools.py
new file mode 100644
index 0000000..dd45f2f
--- /dev/null
+++ b/Lib/importlib/metadata/_itertools.py
@@ -0,0 +1,19 @@
+from itertools import filterfalse
+
+
+def unique_everseen(iterable, key=None):
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
diff --git a/Lib/importlib/metadata/_meta.py b/Lib/importlib/metadata/_meta.py
new file mode 100644
index 0000000..1a6edbf
--- /dev/null
+++ b/Lib/importlib/metadata/_meta.py
@@ -0,0 +1,47 @@
+from typing import Any, Dict, Iterator, List, Protocol, TypeVar, Union
+
+
+_T = TypeVar("_T")
+
+
+class PackageMetadata(Protocol):
+    def __len__(self) -> int:
+        ...  # pragma: no cover
+
+    def __contains__(self, item: str) -> bool:
+        ...  # pragma: no cover
+
+    def __getitem__(self, key: str) -> str:
+        ...  # pragma: no cover
+
+    def __iter__(self) -> Iterator[str]:
+        ...  # pragma: no cover
+
+    def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
+        """
+        Return all values associated with a possibly multi-valued key.
+        """
+
+    @property
+    def json(self) -> Dict[str, Union[str, List[str]]]:
+        """
+        A JSON-compatible form of the metadata.
+        """
+
+
+class SimplePath(Protocol):
+    """
+    A minimal subset of pathlib.Path required by PathDistribution.
+    """
+
+    def joinpath(self) -> 'SimplePath':
+        ...  # pragma: no cover
+
+    def __div__(self) -> 'SimplePath':
+        ...  # pragma: no cover
+
+    def parent(self) -> 'SimplePath':
+        ...  # pragma: no cover
+
+    def read_text(self) -> str:
+        ...  # pragma: no cover
diff --git a/Lib/importlib/metadata/_text.py b/Lib/importlib/metadata/_text.py
new file mode 100644
index 0000000..766979d
--- /dev/null
+++ b/Lib/importlib/metadata/_text.py
@@ -0,0 +1,99 @@
+import re
+
+from ._functools import method_cache
+
+
+# from jaraco.text 3.5
+class FoldedCase(str):
+    """
+    A case insensitive string class; behaves just like str
+    except compares equal when the only variation is case.
+
+    >>> s = FoldedCase('hello world')
+
+    >>> s == 'Hello World'
+    True
+
+    >>> 'Hello World' == s
+    True
+
+    >>> s != 'Hello World'
+    False
+
+    >>> s.index('O')
+    4
+
+    >>> s.split('O')
+    ['hell', ' w', 'rld']
+
+    >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
+    ['alpha', 'Beta', 'GAMMA']
+
+    Sequence membership is straightforward.
+
+    >>> "Hello World" in [s]
+    True
+    >>> s in ["Hello World"]
+    True
+
+    You may test for set inclusion, but candidate and elements
+    must both be folded.
+
+    >>> FoldedCase("Hello World") in {s}
+    True
+    >>> s in {FoldedCase("Hello World")}
+    True
+
+    String inclusion works as long as the FoldedCase object
+    is on the right.
+
+    >>> "hello" in FoldedCase("Hello World")
+    True
+
+    But not if the FoldedCase object is on the left:
+
+    >>> FoldedCase('hello') in 'Hello World'
+    False
+
+    In that case, use in_:
+
+    >>> FoldedCase('hello').in_('Hello World')
+    True
+
+    >>> FoldedCase('hello') > FoldedCase('Hello')
+    False
+    """
+
+    def __lt__(self, other):
+        return self.lower() < other.lower()
+
+    def __gt__(self, other):
+        return self.lower() > other.lower()
+
+    def __eq__(self, other):
+        return self.lower() == other.lower()
+
+    def __ne__(self, other):
+        return self.lower() != other.lower()
+
+    def __hash__(self):
+        return hash(self.lower())
+
+    def __contains__(self, other):
+        return super(FoldedCase, self).lower().__contains__(other.lower())
+
+    def in_(self, other):
+        "Does self appear in other?"
+        return self in FoldedCase(other)
+
+    # cache lower since it's likely to be called frequently.
+    @method_cache
+    def lower(self):
+        return super(FoldedCase, self).lower()
+
+    def index(self, sub):
+        return self.lower().index(sub.lower())
+
+    def split(self, splitter=' ', maxsplit=0):
+        pattern = re.compile(re.escape(splitter), re.I)
+        return pattern.split(self, maxsplit)
diff --git a/Lib/importlib/readers.py b/Lib/importlib/readers.py
new file mode 100644
index 0000000..41089c0
--- /dev/null
+++ b/Lib/importlib/readers.py
@@ -0,0 +1,123 @@
+import collections
+import zipfile
+import pathlib
+from . import abc
+
+
+def remove_duplicates(items):
+    return iter(collections.OrderedDict.fromkeys(items))
+
+
+class FileReader(abc.TraversableResources):
+    def __init__(self, loader):
+        self.path = pathlib.Path(loader.path).parent
+
+    def resource_path(self, resource):
+        """
+        Return the file system path to prevent
+        `resources.path()` from creating a temporary
+        copy.
+        """
+        return str(self.path.joinpath(resource))
+
+    def files(self):
+        return self.path
+
+
+class ZipReader(abc.TraversableResources):
+    def __init__(self, loader, module):
+        _, _, name = module.rpartition('.')
+        self.prefix = loader.prefix.replace('\\', '/') + name + '/'
+        self.archive = loader.archive
+
+    def open_resource(self, resource):
+        try:
+            return super().open_resource(resource)
+        except KeyError as exc:
+            raise FileNotFoundError(exc.args[0])
+
+    def is_resource(self, path):
+        # workaround for `zipfile.Path.is_file` returning true
+        # for non-existent paths.
+        target = self.files().joinpath(path)
+        return target.is_file() and target.exists()
+
+    def files(self):
+        return zipfile.Path(self.archive, self.prefix)
+
+
+class MultiplexedPath(abc.Traversable):
+    """
+    Given a series of Traversable objects, implement a merged
+    version of the interface across all objects. Useful for
+    namespace packages which may be multihomed at a single
+    name.
+    """
+
+    def __init__(self, *paths):
+        self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
+        if not self._paths:
+            message = 'MultiplexedPath must contain at least one path'
+            raise FileNotFoundError(message)
+        if not all(path.is_dir() for path in self._paths):
+            raise NotADirectoryError('MultiplexedPath only supports directories')
+
+    def iterdir(self):
+        visited = []
+        for path in self._paths:
+            for file in path.iterdir():
+                if file.name in visited:
+                    continue
+                visited.append(file.name)
+                yield file
+
+    def read_bytes(self):
+        raise FileNotFoundError(f'{self} is not a file')
+
+    def read_text(self, *args, **kwargs):
+        raise FileNotFoundError(f'{self} is not a file')
+
+    def is_dir(self):
+        return True
+
+    def is_file(self):
+        return False
+
+    def joinpath(self, child):
+        # first try to find child in current paths
+        for file in self.iterdir():
+            if file.name == child:
+                return file
+        # if it does not exist, construct it with the first path
+        return self._paths[0] / child
+
+    __truediv__ = joinpath
+
+    def open(self, *args, **kwargs):
+        raise FileNotFoundError(f'{self} is not a file')
+
+    @property
+    def name(self):
+        return self._paths[0].name
+
+    def __repr__(self):
+        paths = ', '.join(f"'{path}'" for path in self._paths)
+        return f'MultiplexedPath({paths})'
+
+
+class NamespaceReader(abc.TraversableResources):
+    def __init__(self, namespace_path):
+        if 'NamespacePath' not in str(namespace_path):
+            raise ValueError('Invalid path')
+        self.path = MultiplexedPath(*list(namespace_path))
+
+    def resource_path(self, resource):
+        """
+        Return the file system path to prevent
+        `resources.path()` from creating a temporary
+        copy.
+        """
+        return str(self.path.joinpath(resource))
+
+    def files(self):
+        return self.path
diff --git a/Lib/importlib/resources.py b/Lib/importlib/resources.py
index b803a01..8a98663 100644
--- a/Lib/importlib/resources.py
+++ b/Lib/importlib/resources.py
@@ -1,22 +1,26 @@
 import os
+import io
 
-from . import abc as resources_abc
 from . import _common
-from ._common import as_file
-from contextlib import contextmanager, suppress
-from importlib import import_module
+from ._common import as_file, files
+from .abc import ResourceReader
+from contextlib import suppress
 from importlib.abc import ResourceLoader
+from importlib.machinery import ModuleSpec
 from io import BytesIO, TextIOWrapper
 from pathlib import Path
 from types import ModuleType
-from typing import ContextManager, Iterable, Optional, Union
+from typing import ContextManager, Iterable, Union
 from typing import cast
 from typing.io import BinaryIO, TextIO
+from collections.abc import Sequence
+from functools import singledispatch
 
 
 __all__ = [
     'Package',
     'Resource',
+    'ResourceReader',
     'as_file',
     'contents',
     'files',
@@ -26,99 +30,57 @@
     'path',
     'read_binary',
     'read_text',
-    ]
+]
 
 
 Package = Union[str, ModuleType]
 Resource = Union[str, os.PathLike]
 
 
-def _resolve(name) -> ModuleType:
-    """If name is a string, resolve to a module."""
-    if hasattr(name, '__spec__'):
-        return name
-    return import_module(name)
-
-
-def _get_package(package) -> ModuleType:
-    """Take a package name or module object and return the module.
-
-    If a name, the module is imported.  If the resolved module
-    object is not a package, raise an exception.
-    """
-    module = _resolve(package)
-    if module.__spec__.submodule_search_locations is None:
-        raise TypeError('{!r} is not a package'.format(package))
-    return module
-
-
-def _normalize_path(path) -> str:
-    """Normalize a path by ensuring it is a string.
-
-    If the resulting string contains path separators, an exception is raised.
-    """
-    parent, file_name = os.path.split(path)
-    if parent:
-        raise ValueError('{!r} must be only a file name'.format(path))
-    return file_name
-
-
-def _get_resource_reader(
-        package: ModuleType) -> Optional[resources_abc.ResourceReader]:
-    # Return the package's loader if it's a ResourceReader.  We can't use
-    # a issubclass() check here because apparently abc.'s __subclasscheck__()
-    # hook wants to create a weak reference to the object, but
-    # zipimport.zipimporter does not support weak references, resulting in a
-    # TypeError.  That seems terrible.
-    spec = package.__spec__
-    if hasattr(spec.loader, 'get_resource_reader'):
-        return cast(resources_abc.ResourceReader,
-                    spec.loader.get_resource_reader(spec.name))
-    return None
-
-
-def _check_location(package):
-    if package.__spec__.origin is None or not package.__spec__.has_location:
-        raise FileNotFoundError(f'Package has no location {package!r}')
-
-
 def open_binary(package: Package, resource: Resource) -> BinaryIO:
     """Return a file-like object opened for binary reading of the resource."""
-    resource = _normalize_path(resource)
-    package = _get_package(package)
-    reader = _get_resource_reader(package)
+    resource = _common.normalize_path(resource)
+    package = _common.get_package(package)
+    reader = _common.get_resource_reader(package)
     if reader is not None:
         return reader.open_resource(resource)
-    absolute_package_path = os.path.abspath(
-        package.__spec__.origin or 'non-existent file')
-    package_path = os.path.dirname(absolute_package_path)
-    full_path = os.path.join(package_path, resource)
-    try:
-        return open(full_path, mode='rb')
-    except OSError:
-        # Just assume the loader is a resource loader; all the relevant
-        # importlib.machinery loaders are and an AttributeError for
-        # get_data() will make it clear what is needed from the loader.
-        loader = cast(ResourceLoader, package.__spec__.loader)
-        data = None
-        if hasattr(package.__spec__.loader, 'get_data'):
-            with suppress(OSError):
-                data = loader.get_data(full_path)
-        if data is None:
-            package_name = package.__spec__.name
-            message = '{!r} resource not found in {!r}'.format(
-                resource, package_name)
-            raise FileNotFoundError(message)
-        return BytesIO(data)
+    spec = cast(ModuleSpec, package.__spec__)
+    # Using pathlib doesn't work well here due to the lack of 'strict'
+    # argument for pathlib.Path.resolve() prior to Python 3.6.
+    if spec.submodule_search_locations is not None:
+        paths = spec.submodule_search_locations
+    elif spec.origin is not None:
+        paths = [os.path.dirname(os.path.abspath(spec.origin))]
+
+    for package_path in paths:
+        full_path = os.path.join(package_path, resource)
+        try:
+            return open(full_path, mode='rb')
+        except OSError:
+            # Just assume the loader is a resource loader; all the relevant
+            # importlib.machinery loaders are and an AttributeError for
+            # get_data() will make it clear what is needed from the loader.
+            loader = cast(ResourceLoader, spec.loader)
+            data = None
+            if hasattr(spec.loader, 'get_data'):
+                with suppress(OSError):
+                    data = loader.get_data(full_path)
+            if data is not None:
+                return BytesIO(data)
+
+    raise FileNotFoundError(f'{resource!r} resource not found in {spec.name!r}')
 
 
-def open_text(package: Package,
-              resource: Resource,
-              encoding: str = 'utf-8',
-              errors: str = 'strict') -> TextIO:
+def open_text(
+    package: Package,
+    resource: Resource,
+    encoding: str = 'utf-8',
+    errors: str = 'strict',
+) -> TextIO:
     """Return a file-like object opened for text reading of the resource."""
     return TextIOWrapper(
-        open_binary(package, resource), encoding=encoding, errors=errors)
+        open_binary(package, resource), encoding=encoding, errors=errors
+    )
 
 
 def read_binary(package: Package, resource: Resource) -> bytes:
@@ -127,10 +89,12 @@
         return fp.read()
 
 
-def read_text(package: Package,
-              resource: Resource,
-              encoding: str = 'utf-8',
-              errors: str = 'strict') -> str:
+def read_text(
+    package: Package,
+    resource: Resource,
+    encoding: str = 'utf-8',
+    errors: str = 'strict',
+) -> str:
     """Return the decoded string of the resource.
 
     The decoding-related arguments have the same semantics as those of
@@ -140,16 +104,10 @@
         return fp.read()
 
 
-def files(package: Package) -> resources_abc.Traversable:
-    """
-    Get a Traversable resource from a package
-    """
-    return _common.from_package(_get_package(package))
-
-
 def path(
-        package: Package, resource: Resource,
-        ) -> 'ContextManager[Path]':
+    package: Package,
+    resource: Resource,
+) -> 'ContextManager[Path]':
     """A context manager providing a file path object to the resource.
 
     If the resource does not already exist on its own on the file system,
@@ -158,23 +116,30 @@
     raised if the file was deleted prior to the context manager
     exiting).
     """
-    reader = _get_resource_reader(_get_package(package))
+    reader = _common.get_resource_reader(_common.get_package(package))
     return (
-        _path_from_reader(reader, resource)
-        if reader else
-        _common.as_file(files(package).joinpath(_normalize_path(resource)))
+        _path_from_reader(reader, _common.normalize_path(resource))
+        if reader
+        else _common.as_file(
+            _common.files(package).joinpath(_common.normalize_path(resource))
         )
+    )
 
 
-@contextmanager
 def _path_from_reader(reader, resource):
-    norm_resource = _normalize_path(resource)
+    return _path_from_resource_path(reader, resource) or _path_from_open_resource(
+        reader, resource
+    )
+
+
+def _path_from_resource_path(reader, resource):
     with suppress(FileNotFoundError):
-        yield Path(reader.resource_path(norm_resource))
-        return
-    opener_reader = reader.open_resource(norm_resource)
-    with _common._tempfile(opener_reader.read, suffix=norm_resource) as res:
-        yield res
+        return Path(reader.resource_path(resource))
+
+
+def _path_from_open_resource(reader, resource):
+    saved = io.BytesIO(reader.open_resource(resource).read())
+    return _common._tempfile(saved.read, suffix=resource)
 
 
 def is_resource(package: Package, name: str) -> bool:
@@ -182,9 +147,9 @@
 
     Directories are *not* resources.
     """
-    package = _get_package(package)
-    _normalize_path(name)
-    reader = _get_resource_reader(package)
+    package = _common.get_package(package)
+    _common.normalize_path(name)
+    reader = _common.get_resource_reader(package)
     if reader is not None:
         return reader.is_resource(name)
     package_contents = set(contents(package))
@@ -200,16 +165,21 @@
     not considered resources.  Use `is_resource()` on each entry returned here
     to check if it is a resource or not.
     """
-    package = _get_package(package)
-    reader = _get_resource_reader(package)
+    package = _common.get_package(package)
+    reader = _common.get_resource_reader(package)
     if reader is not None:
-        return reader.contents()
-    # Is the package a namespace package?  By definition, namespace packages
-    # cannot have resources.
-    namespace = (
-        package.__spec__.origin is None or
-        package.__spec__.origin == 'namespace'
-        )
-    if namespace or not package.__spec__.has_location:
-        return ()
-    return list(item.name for item in _common.from_package(package).iterdir())
+        return _ensure_sequence(reader.contents())
+    transversable = _common.from_package(package)
+    if transversable.is_dir():
+        return list(item.name for item in transversable.iterdir())
+    return []
+
+
+@singledispatch
+def _ensure_sequence(iterable):
+    return list(iterable)
+
+
+@_ensure_sequence.register(Sequence)
+def _(iterable):
+    return iterable
diff --git a/Lib/importlib/util.py b/Lib/importlib/util.py
index 269a6fa..8623c89 100644
--- a/Lib/importlib/util.py
+++ b/Lib/importlib/util.py
@@ -1,5 +1,5 @@
 """Utility code for constructing importers, etc."""
-from . import abc
+from ._abc import Loader
 from ._bootstrap import module_from_spec
 from ._bootstrap import _resolve_name
 from ._bootstrap import spec_from_loader
@@ -149,7 +149,8 @@
     """
     @functools.wraps(fxn)
     def set_package_wrapper(*args, **kwargs):
-        warnings.warn('The import system now takes care of this automatically.',
+        warnings.warn('The import system now takes care of this automatically; '
+                      'this decorator is slated for removal in Python 3.12',
                       DeprecationWarning, stacklevel=2)
         module = fxn(*args, **kwargs)
         if getattr(module, '__package__', None) is None:
@@ -168,7 +169,8 @@
     """
     @functools.wraps(fxn)
     def set_loader_wrapper(self, *args, **kwargs):
-        warnings.warn('The import system now takes care of this automatically.',
+        warnings.warn('The import system now takes care of this automatically; '
+                      'this decorator is slated for removal in Python 3.12',
                       DeprecationWarning, stacklevel=2)
         module = fxn(self, *args, **kwargs)
         if getattr(module, '__loader__', None) is None:
@@ -195,7 +197,8 @@
     the second argument.
 
     """
-    warnings.warn('The import system now takes care of this automatically.',
+    warnings.warn('The import system now takes care of this automatically; '
+                  'this decorator is slated for removal in Python 3.12',
                   DeprecationWarning, stacklevel=2)
     @functools.wraps(fxn)
     def module_for_loader_wrapper(self, fullname, *args, **kwargs):
@@ -232,7 +235,6 @@
         # Figure out exactly what attributes were mutated between the creation
         # of the module and now.
         attrs_then = self.__spec__.loader_state['__dict__']
-        original_type = self.__spec__.loader_state['__class__']
         attrs_now = self.__dict__
         attrs_updated = {}
         for key, value in attrs_now.items():
@@ -263,7 +265,7 @@
         delattr(self, attr)
 
 
-class LazyLoader(abc.Loader):
+class LazyLoader(Loader):
 
     """A loader that creates a module which defers loading until attribute access."""
 
diff --git a/Lib/inspect.py b/Lib/inspect.py
index 18bed90..c5881cc 100644
--- a/Lib/inspect.py
+++ b/Lib/inspect.py
@@ -24,6 +24,8 @@
     stack(), trace() - get info about frames on the stack or in a traceback
 
     signature() - get a Signature object for the callable
+
+    get_annotations() - safely compute an object's annotations
 """
 
 # This module is in the public domain.  No warranties.
@@ -60,6 +62,122 @@
 # See Include/object.h
 TPFLAGS_IS_ABSTRACT = 1 << 20
 
+
+def get_annotations(obj, *, globals=None, locals=None, eval_str=False):
+    """Compute the annotations dict for an object.
+
+    obj may be a callable, class, or module.
+    Passing in an object of any other type raises TypeError.
+
+    Returns a dict.  get_annotations() returns a new dict every time
+    it's called; calling it twice on the same object will return two
+    different but equivalent dicts.
+
+    This function handles several details for you:
+
+      * If eval_str is true, values of type str will
+        be un-stringized using eval().  This is intended
+        for use with stringized annotations
+        ("from __future__ import annotations").
+      * If obj doesn't have an annotations dict, returns an
+        empty dict.  (Functions and methods always have an
+        annotations dict; classes, modules, and other types of
+        callables may not.)
+      * Ignores inherited annotations on classes.  If a class
+        doesn't have its own annotations dict, returns an empty dict.
+      * All accesses to object members and dict values are done
+        using getattr() and dict.get() for safety.
+      * Always, always, always returns a freshly-created dict.
+
+    eval_str controls whether or not values of type str are replaced
+    with the result of calling eval() on those values:
+
+      * If eval_str is true, eval() is called on values of type str.
+      * If eval_str is false (the default), values of type str are unchanged.
+
+    globals and locals are passed in to eval(); see the documentation
+    for eval() for more information.  If either globals or locals is
+    None, this function may replace that value with a context-specific
+    default, contingent on type(obj):
+
+      * If obj is a module, globals defaults to obj.__dict__.
+      * If obj is a class, globals defaults to
+        sys.modules[obj.__module__].__dict__ and locals
+        defaults to the obj class namespace.
+      * If obj is a callable, globals defaults to obj.__globals__,
+        although if obj is a wrapped function (using
+        functools.update_wrapper()) it is first unwrapped.
+    """
+    if isinstance(obj, type):
+        # class
+        obj_dict = getattr(obj, '__dict__', None)
+        if obj_dict and hasattr(obj_dict, 'get'):
+            ann = obj_dict.get('__annotations__', None)
+            if isinstance(ann, types.GetSetDescriptorType):
+                ann = None
+        else:
+            ann = None
+
+        obj_globals = None
+        module_name = getattr(obj, '__module__', None)
+        if module_name:
+            module = sys.modules.get(module_name, None)
+            if module:
+                obj_globals = getattr(module, '__dict__', None)
+        obj_locals = dict(vars(obj))
+        unwrap = obj
+    elif isinstance(obj, types.ModuleType):
+        # module
+        ann = getattr(obj, '__annotations__', None)
+        obj_globals = getattr(obj, '__dict__')
+        obj_locals = None
+        unwrap = None
+    elif callable(obj):
+        # this includes types.Function, types.BuiltinFunctionType,
+        # types.BuiltinMethodType, functools.partial, functools.singledispatch,
+        # "class funclike" from Lib/test/test_inspect... on and on it goes.
+        ann = getattr(obj, '__annotations__', None)
+        obj_globals = getattr(obj, '__globals__', None)
+        obj_locals = None
+        unwrap = obj
+    else:
+        raise TypeError(f"{obj!r} is not a module, class, or callable.")
+
+    if ann is None:
+        return {}
+
+    if not isinstance(ann, dict):
+        raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None")
+
+    if not ann:
+        return {}
+
+    if not eval_str:
+        return dict(ann)
+
+    if unwrap is not None:
+        while True:
+            if hasattr(unwrap, '__wrapped__'):
+                unwrap = unwrap.__wrapped__
+                continue
+            if isinstance(unwrap, functools.partial):
+                unwrap = unwrap.func
+                continue
+            break
+        if hasattr(unwrap, "__globals__"):
+            obj_globals = unwrap.__globals__
+
+    if globals is None:
+        globals = obj_globals
+    if locals is None:
+        locals = obj_locals
+
+    return_value = {key:
+        value if not isinstance(value, str) else eval(value, globals, locals)
+        for key, value in ann.items() }
+    return return_value
+
+
 # ----------------------------------------------------------- type-checking
 def ismodule(object):
     """Return true if the object is a module.
@@ -277,7 +395,7 @@
         co_kwonlyargcount   number of keyword only arguments (not including ** arg)
         co_lnotab           encoded mapping of line numbers to bytecode indices
         co_name             name with which this code object was defined
-        co_names            tuple of names of local variables
+        co_names            tuple of names other than arguments and function locals
         co_nlocals          number of local variables
         co_stacksize        virtual machine stack space required
         co_varnames         tuple of names of arguments and local variables"""
@@ -407,7 +525,7 @@
     # attribute with the same name as a DynamicClassAttribute exists.
     for base in mro:
         for k, v in base.__dict__.items():
-            if isinstance(v, types.DynamicClassAttribute):
+            if isinstance(v, types.DynamicClassAttribute) and v.fget is not None:
                 names.append(k)
     result = []
     processed = set()
@@ -663,6 +781,8 @@
             module = sys.modules.get(object.__module__)
             if getattr(module, '__file__', None):
                 return module.__file__
+            if object.__module__ == '__main__':
+                raise OSError('source code not available')
         raise TypeError('{!r} is a built-in class'.format(object))
     if ismethod(object):
         object = object.__func__
@@ -706,10 +826,13 @@
     if os.path.exists(filename):
         return filename
     # only return a non-existent filename if the module has a PEP 302 loader
-    if getattr(getmodule(object, filename), '__loader__', None) is not None:
+    module = getmodule(object, filename)
+    if getattr(module, '__loader__', None) is not None:
+        return filename
+    elif getattr(getattr(module, "__spec__", None), "loader", None) is not None:
         return filename
     # or it is in the linecache
-    if filename in linecache.cache:
+    elif filename in linecache.cache:
         return filename
 
 def getabsfile(object, _filename=None):
@@ -736,7 +859,7 @@
     # Try the cache again with the absolute file name
     try:
         file = getabsfile(object, _filename)
-    except TypeError:
+    except (TypeError, FileNotFoundError):
         return None
     if file in modulesbyfile:
         return sys.modules.get(modulesbyfile[file])
@@ -1162,7 +1285,8 @@
         sig = _signature_from_callable(func,
                                        follow_wrapper_chains=False,
                                        skip_bound_arg=False,
-                                       sigcls=Signature)
+                                       sigcls=Signature,
+                                       eval_str=False)
     except Exception as ex:
         # Most of the times 'signature' will raise ValueError.
         # But, it can also raise AttributeError, and, maybe something
@@ -1233,6 +1357,8 @@
 def formatannotation(annotation, base_module=None):
     if getattr(annotation, '__module__', None) == 'typing':
         return repr(annotation).replace('typing.', '')
+    if isinstance(annotation, types.GenericAlias):
+        return str(annotation)
     if isinstance(annotation, type):
         if annotation.__module__ in ('builtins', base_module):
             return annotation.__qualname__
@@ -1895,7 +2021,7 @@
             isinstance(name, str) and
             (defaults is None or isinstance(defaults, tuple)) and
             (kwdefaults is None or isinstance(kwdefaults, dict)) and
-            isinstance(annotations, dict))
+            (isinstance(annotations, (dict)) or annotations is None) )
 
 
 def _signature_get_bound_param(spec):
@@ -2147,7 +2273,8 @@
     return _signature_fromstr(cls, func, s, skip_bound_arg)
 
 
-def _signature_from_function(cls, func, skip_bound_arg=True):
+def _signature_from_function(cls, func, skip_bound_arg=True,
+                             globals=None, locals=None, eval_str=False):
     """Private helper: constructs Signature for the given python function."""
 
     is_duck_function = False
@@ -2173,7 +2300,7 @@
     positional = arg_names[:pos_count]
     keyword_only_count = func_code.co_kwonlyargcount
     keyword_only = arg_names[pos_count:pos_count + keyword_only_count]
-    annotations = func.__annotations__
+    annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str)
     defaults = func.__defaults__
     kwdefaults = func.__kwdefaults__
 
@@ -2244,23 +2371,30 @@
 def _signature_from_callable(obj, *,
                              follow_wrapper_chains=True,
                              skip_bound_arg=True,
+                             globals=None,
+                             locals=None,
+                             eval_str=False,
                              sigcls):
 
     """Private helper function to get signature for arbitrary
     callable objects.
     """
 
+    _get_signature_of = functools.partial(_signature_from_callable,
+                                follow_wrapper_chains=follow_wrapper_chains,
+                                skip_bound_arg=skip_bound_arg,
+                                globals=globals,
+                                locals=locals,
+                                sigcls=sigcls,
+                                eval_str=eval_str)
+
     if not callable(obj):
         raise TypeError('{!r} is not a callable object'.format(obj))
 
     if isinstance(obj, types.MethodType):
         # In this case we skip the first parameter of the underlying
         # function (usually `self` or `cls`).
-        sig = _signature_from_callable(
-            obj.__func__,
-            follow_wrapper_chains=follow_wrapper_chains,
-            skip_bound_arg=skip_bound_arg,
-            sigcls=sigcls)
+        sig = _get_signature_of(obj.__func__)
 
         if skip_bound_arg:
             return _signature_bound_method(sig)
@@ -2274,11 +2408,7 @@
             # If the unwrapped object is a *method*, we might want to
             # skip its first parameter (self).
             # See test_signature_wrapped_bound_method for details.
-            return _signature_from_callable(
-                obj,
-                follow_wrapper_chains=follow_wrapper_chains,
-                skip_bound_arg=skip_bound_arg,
-                sigcls=sigcls)
+            return _get_signature_of(obj)
 
     try:
         sig = obj.__signature__
@@ -2305,11 +2435,7 @@
             # (usually `self`, or `cls`) will not be passed
             # automatically (as for boundmethods)
 
-            wrapped_sig = _signature_from_callable(
-                partialmethod.func,
-                follow_wrapper_chains=follow_wrapper_chains,
-                skip_bound_arg=skip_bound_arg,
-                sigcls=sigcls)
+            wrapped_sig = _get_signature_of(partialmethod.func)
 
             sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
             first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
@@ -2328,18 +2454,15 @@
         # If it's a pure Python function, or an object that is duck type
         # of a Python function (Cython functions, for instance), then:
         return _signature_from_function(sigcls, obj,
-                                        skip_bound_arg=skip_bound_arg)
+                                        skip_bound_arg=skip_bound_arg,
+                                        globals=globals, locals=locals, eval_str=eval_str)
 
     if _signature_is_builtin(obj):
         return _signature_from_builtin(sigcls, obj,
                                        skip_bound_arg=skip_bound_arg)
 
     if isinstance(obj, functools.partial):
-        wrapped_sig = _signature_from_callable(
-            obj.func,
-            follow_wrapper_chains=follow_wrapper_chains,
-            skip_bound_arg=skip_bound_arg,
-            sigcls=sigcls)
+        wrapped_sig = _get_signature_of(obj.func)
         return _signature_get_partial(wrapped_sig, obj)
 
     sig = None
@@ -2350,29 +2473,25 @@
         # in its metaclass
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
-            sig = _signature_from_callable(
-                call,
-                follow_wrapper_chains=follow_wrapper_chains,
-                skip_bound_arg=skip_bound_arg,
-                sigcls=sigcls)
+            sig = _get_signature_of(call)
         else:
-            # Now we check if the 'obj' class has a '__new__' method
+            factory_method = None
             new = _signature_get_user_defined_method(obj, '__new__')
-            if new is not None:
-                sig = _signature_from_callable(
-                    new,
-                    follow_wrapper_chains=follow_wrapper_chains,
-                    skip_bound_arg=skip_bound_arg,
-                    sigcls=sigcls)
-            else:
-                # Finally, we should have at least __init__ implemented
-                init = _signature_get_user_defined_method(obj, '__init__')
-                if init is not None:
-                    sig = _signature_from_callable(
-                        init,
-                        follow_wrapper_chains=follow_wrapper_chains,
-                        skip_bound_arg=skip_bound_arg,
-                        sigcls=sigcls)
+            init = _signature_get_user_defined_method(obj, '__init__')
+            # Now we check if the 'obj' class has an own '__new__' method
+            if '__new__' in obj.__dict__:
+                factory_method = new
+            # or an own '__init__' method
+            elif '__init__' in obj.__dict__:
+                factory_method = init
+            # If not, we take inherited '__new__' or '__init__', if present
+            elif new is not None:
+                factory_method = new
+            elif init is not None:
+                factory_method = init
+
+            if factory_method is not None:
+                sig = _get_signature_of(factory_method)
 
         if sig is None:
             # At this point we know, that `obj` is a class, with no user-
@@ -2392,9 +2511,9 @@
                     pass
                 else:
                     if text_sig:
-                        # If 'obj' class has a __text_signature__ attribute:
+                        # If 'base' class has a __text_signature__ attribute:
                         # return a signature based on it
-                        return _signature_fromstr(sigcls, obj, text_sig)
+                        return _signature_fromstr(sigcls, base, text_sig)
 
             # No '__text_signature__' was found for the 'obj' class.
             # Last option is to check if its '__init__' is
@@ -2418,11 +2537,7 @@
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
             try:
-                sig = _signature_from_callable(
-                    call,
-                    follow_wrapper_chains=follow_wrapper_chains,
-                    skip_bound_arg=skip_bound_arg,
-                    sigcls=sigcls)
+                sig = _get_signature_of(call)
             except ValueError as ex:
                 msg = 'no signature found for {!r}'.format(obj)
                 raise ValueError(msg) from ex
@@ -2874,10 +2989,12 @@
         return _signature_from_builtin(cls, func)
 
     @classmethod
-    def from_callable(cls, obj, *, follow_wrapped=True):
+    def from_callable(cls, obj, *,
+                      follow_wrapped=True, globals=None, locals=None, eval_str=False):
         """Constructs Signature for the given callable object."""
         return _signature_from_callable(obj, sigcls=cls,
-                                        follow_wrapper_chains=follow_wrapped)
+                                        follow_wrapper_chains=follow_wrapped,
+                                        globals=globals, locals=locals, eval_str=eval_str)
 
     @property
     def parameters(self):
@@ -3125,9 +3242,10 @@
         return rendered
 
 
-def signature(obj, *, follow_wrapped=True):
+def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False):
     """Get a signature object for the passed callable."""
-    return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
+    return Signature.from_callable(obj, follow_wrapped=follow_wrapped,
+                                   globals=globals, locals=locals, eval_str=eval_str)
 
 
 def _main():
diff --git a/Lib/io.py b/Lib/io.py
index fbce6ef..2a6140c 100644
--- a/Lib/io.py
+++ b/Lib/io.py
@@ -54,9 +54,24 @@
 from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
                  open, open_code, FileIO, BytesIO, StringIO, BufferedReader,
                  BufferedWriter, BufferedRWPair, BufferedRandom,
-                 IncrementalNewlineDecoder, TextIOWrapper)
+                 IncrementalNewlineDecoder, text_encoding, TextIOWrapper)
 
-OpenWrapper = _io.open # for compatibility with _pyio
+
+def __getattr__(name):
+    if name == "OpenWrapper":
+        # bpo-43680: Until Python 3.9, _pyio.open was not a static method and
+        # builtins.open was set to OpenWrapper to not become a bound method
+        # when set to a class variable. _io.open is a built-in function whereas
+        # _pyio.open is a Python function. In Python 3.10, _pyio.open() is now
+        # a static method, and builtins.open() is now io.open().
+        import warnings
+        warnings.warn('OpenWrapper is deprecated, use open instead',
+                      DeprecationWarning, stacklevel=2)
+        global OpenWrapper
+        OpenWrapper = open
+        return OpenWrapper
+    raise AttributeError(name)
+
 
 # Pretend this exception was created here.
 UnsupportedOperation.__module__ = "io"
diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
index bc662c4..4a6496a 100644
--- a/Lib/ipaddress.py
+++ b/Lib/ipaddress.py
@@ -16,6 +16,7 @@
 IPV4LENGTH = 32
 IPV6LENGTH = 128
 
+
 class AddressValueError(ValueError):
     """A Value Error related to the address."""
 
@@ -1214,7 +1215,7 @@
         """
         if not octet_str:
             raise ValueError("Empty octet not permitted")
-        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        # Reject non-ASCII digits.
         if not (octet_str.isascii() and octet_str.isdigit()):
             msg = "Only decimal digits permitted in %r"
             raise ValueError(msg % octet_str)
@@ -1223,6 +1224,11 @@
         if len(octet_str) > 3:
             msg = "At most 3 characters permitted in %r"
             raise ValueError(msg % octet_str)
+        # Handle leading zeros as strict as glibc's inet_pton()
+        # See security bug bpo-36384
+        if octet_str != '0' and octet_str[0] == '0':
+            msg = "Leading zeros are not permitted in %r"
+            raise ValueError(msg % octet_str)
         # Convert to integer (we know digits are legal)
         octet_int = int(octet_str, 10)
         if octet_int > 255:
@@ -1719,7 +1725,7 @@
               [0..FFFF].
 
         """
-        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        # Reject non-ASCII digits.
         if not cls._HEX_DIGITS.issuperset(hextet_str):
             raise ValueError("Only hex digits permitted in %r" % hextet_str)
         # We do the length check second, since the invalid character error
@@ -1997,9 +2003,13 @@
 
         Returns:
             A boolean, True if the address is reserved per
-            iana-ipv6-special-registry.
+            iana-ipv6-special-registry, or is ipv4_mapped and is
+            reserved in the iana-ipv4-special-registry.
 
         """
+        ipv4_mapped = self.ipv4_mapped
+        if ipv4_mapped is not None:
+            return ipv4_mapped.is_private
         return any(self in net for net in self._constants._private_networks)
 
     @property
diff --git a/Lib/json/__init__.py b/Lib/json/__init__.py
index 2c52bde..e4c21da 100644
--- a/Lib/json/__init__.py
+++ b/Lib/json/__init__.py
@@ -133,7 +133,7 @@
 
     If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
-    result in an ``OverflowError`` (or worse).
+    result in an ``RecursionError`` (or worse).
 
     If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
@@ -195,7 +195,7 @@
 
     If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
-    result in an ``OverflowError`` (or worse).
+    result in an ``RecursionError`` (or worse).
 
     If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
diff --git a/Lib/json/encoder.py b/Lib/json/encoder.py
index c8c78b9..21bff2c 100644
--- a/Lib/json/encoder.py
+++ b/Lib/json/encoder.py
@@ -116,7 +116,7 @@
 
         If check_circular is true, then lists, dicts, and custom encoded
         objects will be checked for circular references during encoding to
-        prevent an infinite recursion (which would cause an OverflowError).
+        prevent an infinite recursion (which would cause an RecursionError).
         Otherwise, no such check takes place.
 
         If allow_nan is true, then NaN, Infinity, and -Infinity will be
diff --git a/Lib/json/tool.py b/Lib/json/tool.py
index 5dee0a7..0490b8c 100644
--- a/Lib/json/tool.py
+++ b/Lib/json/tool.py
@@ -13,6 +13,7 @@
 import argparse
 import json
 import sys
+from pathlib import Path
 
 
 def main():
@@ -25,9 +26,9 @@
                         help='a JSON file to be validated or pretty-printed',
                         default=sys.stdin)
     parser.add_argument('outfile', nargs='?',
-                        type=argparse.FileType('w', encoding="utf-8"),
+                        type=Path,
                         help='write the output of infile to outfile',
-                        default=sys.stdout)
+                        default=None)
     parser.add_argument('--sort-keys', action='store_true', default=False,
                         help='sort the output of dictionaries alphabetically by key')
     parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false',
@@ -58,15 +59,21 @@
         dump_args['indent'] = None
         dump_args['separators'] = ',', ':'
 
-    with options.infile as infile, options.outfile as outfile:
+    with options.infile as infile:
         try:
             if options.json_lines:
                 objs = (json.loads(line) for line in infile)
             else:
-                objs = (json.load(infile), )
-            for obj in objs:
-                json.dump(obj, outfile, **dump_args)
-                outfile.write('\n')
+                objs = (json.load(infile),)
+
+            if options.outfile is None:
+                out = sys.stdout
+            else:
+                out = options.outfile.open('w', encoding='utf-8')
+            with out as outfile:
+                for obj in objs:
+                    json.dump(obj, outfile, **dump_args)
+                    outfile.write('\n')
         except ValueError as e:
             raise SystemExit(e)
 
diff --git a/Lib/keyword.py b/Lib/keyword.py
index 59fcfb0..cc2b46b 100644
--- a/Lib/keyword.py
+++ b/Lib/keyword.py
@@ -6,7 +6,7 @@
 the python source tree and run:
 
     PYTHONPATH=Tools/peg_generator python3 -m pegen.keywordgen \
-        Grammar/Grammar \
+        Grammar/python.gram \
         Grammar/Tokens \
         Lib/keyword.py
 
@@ -19,7 +19,6 @@
     'False',
     'None',
     'True',
-    '__peg_parser__',
     'and',
     'as',
     'assert',
@@ -55,7 +54,9 @@
 ]
 
 softkwlist = [
-
+    '_',
+    'case',
+    'match'
 ]
 
 iskeyword = frozenset(kwlist).__contains__
diff --git a/Lib/lib2to3/Grammar.txt b/Lib/lib2to3/Grammar.txt
index e007dc1..fa7b150 100644
--- a/Lib/lib2to3/Grammar.txt
+++ b/Lib/lib2to3/Grammar.txt
@@ -18,15 +18,55 @@
 async_funcdef: ASYNC funcdef
 funcdef: 'def' NAME parameters ['->' test] ':' suite
 parameters: '(' [typedargslist] ')'
-typedargslist: ((tfpdef ['=' test] ',')*
-                ('*' [tname] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [','])
-                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+
+# The following definition for typedarglist is equivalent to this set of rules:
+#
+#     arguments = argument (',' argument)*
+#     argument = tfpdef ['=' test]
+#     kwargs = '**' tname [',']
+#     args = '*' [tname]
+#     kwonly_kwargs = (',' argument)* [',' [kwargs]]
+#     args_kwonly_kwargs = args kwonly_kwargs | kwargs
+#     poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]]
+#     typedargslist_no_posonly  = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs
+#     typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)"
+#
+# It needs to be fully expanded to allow our LL(1) parser to work on it.
+
+typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [
+                     ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])*
+                            [',' ['**' tname [',']]] | '**' tname [','])
+                     | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])]
+                ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])*
+                     [',' ['**' tname [',']]] | '**' tname [','])
+                     | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+
 tname: NAME [':' test]
 tfpdef: tname | '(' tfplist ')'
 tfplist: tfpdef (',' tfpdef)* [',']
-varargslist: ((vfpdef ['=' test] ',')*
-              ('*' [vname] (',' vname ['=' test])*  [',' ['**' vname [',']]] | '**' vname [','])
-              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+
+# The following definition for varargslist is equivalent to this set of rules:
+#
+#     arguments = argument (',' argument )*
+#     argument = vfpdef ['=' test]
+#     kwargs = '**' vname [',']
+#     args = '*' [vname]
+#     kwonly_kwargs = (',' argument )* [',' [kwargs]]
+#     args_kwonly_kwargs = args kwonly_kwargs | kwargs
+#     poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]]
+#     vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs
+#     varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly)
+#
+# It needs to be fully expanded to allow our LL(1) parser to work on it.
+
+varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [
+                     ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])*
+                            [',' ['**' vname [',']]] | '**' vname [','])
+                            | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+                     ]] | ((vfpdef ['=' test] ',')*
+                     ('*' [vname] (',' vname ['=' test])*  [',' ['**' vname [',']]]| '**' vname [','])
+                     | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+
 vname: NAME
 vfpdef: vname | '(' vfplist ')'
 vfplist: vfpdef (',' vfpdef)* [',']
diff --git a/Lib/lib2to3/fixes/fix_metaclass.py b/Lib/lib2to3/fixes/fix_metaclass.py
index d1cd10d..fe547b2 100644
--- a/Lib/lib2to3/fixes/fix_metaclass.py
+++ b/Lib/lib2to3/fixes/fix_metaclass.py
@@ -51,7 +51,7 @@
             # already in the preferred format, do nothing
             return
 
-    # !%@#! oneliners have no suite node, we have to fake one up
+    # !%@#! one-liners have no suite node, we have to fake one up
     for i, node in enumerate(cls_node.children):
         if node.type == token.COLON:
             break
diff --git a/Lib/lib2to3/fixes/fix_paren.py b/Lib/lib2to3/fixes/fix_paren.py
index b205aa7..df3da5f 100644
--- a/Lib/lib2to3/fixes/fix_paren.py
+++ b/Lib/lib2to3/fixes/fix_paren.py
@@ -1,4 +1,4 @@
-"""Fixer that addes parentheses where they are required
+"""Fixer that adds parentheses where they are required
 
 This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
 
diff --git a/Lib/lib2to3/pgen2/pgen.py b/Lib/lib2to3/pgen2/pgen.py
index b0cbd16..7abd5ce 100644
--- a/Lib/lib2to3/pgen2/pgen.py
+++ b/Lib/lib2to3/pgen2/pgen.py
@@ -12,7 +12,7 @@
     def __init__(self, filename, stream=None):
         close_stream = None
         if stream is None:
-            stream = open(filename)
+            stream = open(filename, encoding="utf-8")
             close_stream = stream.close
         self.filename = filename
         self.stream = stream
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 0e2685d..099dfa7 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -512,13 +512,14 @@
                         stashed = tok
                         continue
 
-                    if token == 'def':
+                    if token in ('def', 'for'):
                         if (stashed
                                 and stashed[0] == NAME
                                 and stashed[1] == 'async'):
 
-                            async_def = True
-                            async_def_indent = indents[-1]
+                            if token == 'def':
+                                async_def = True
+                                async_def_indent = indents[-1]
 
                             yield (ASYNC, stashed[1],
                                    stashed[2], stashed[3],
diff --git a/Lib/lib2to3/pytree.py b/Lib/lib2to3/pytree.py
index 2a6ef2e..729023d 100644
--- a/Lib/lib2to3/pytree.py
+++ b/Lib/lib2to3/pytree.py
@@ -720,8 +720,8 @@
                         r[self.name] = nodes[:count]
                     yield count, r
             except RuntimeError:
-                # We fall back to the iterative pattern matching scheme if the recursive
-                # scheme hits the recursion limit.
+                # Fall back to the iterative pattern matching scheme if the
+                # recursive scheme hits the recursion limit (RecursionError).
                 for count, r in self._iterative_matches(nodes):
                     if self.name:
                         r[self.name] = nodes[:count]
diff --git a/Lib/linecache.py b/Lib/linecache.py
index fa5dbd0..23191d6 100644
--- a/Lib/linecache.py
+++ b/Lib/linecache.py
@@ -154,7 +154,7 @@
 
     :return: True if a lazy load is registered in the cache,
         otherwise False. To register such a load a module loader with a
-        get_source method must be found, the filename must be a cachable
+        get_source method must be found, the filename must be a cacheable
         filename, and the filename must not be already cached.
     """
     if filename in cache:
@@ -165,9 +165,14 @@
     if not filename or (filename.startswith('<') and filename.endswith('>')):
         return False
     # Try for a __loader__, if available
-    if module_globals and '__loader__' in module_globals:
-        name = module_globals.get('__name__')
-        loader = module_globals['__loader__']
+    if module_globals and '__name__' in module_globals:
+        name = module_globals['__name__']
+        if (loader := module_globals.get('__loader__')) is None:
+            if spec := module_globals.get('__spec__'):
+                try:
+                    loader = spec.loader
+                except AttributeError:
+                    pass
         get_source = getattr(loader, 'get_source', None)
 
         if name and get_source:
diff --git a/Lib/locale.py b/Lib/locale.py
index 1a4e9f6..6d4f519 100644
--- a/Lib/locale.py
+++ b/Lib/locale.py
@@ -185,8 +185,14 @@
         formatted = percent % ((value,) + additional)
     else:
         formatted = percent % value
+    if percent[-1] in 'eEfFgGdiu':
+        formatted = _localize(formatted, grouping, monetary)
+    return formatted
+
+# Transform formatted as locale number according to the locale settings
+def _localize(formatted, grouping=False, monetary=False):
     # floats and decimal ints need special action!
-    if percent[-1] in 'eEfFgG':
+    if '.' in formatted:
         seps = 0
         parts = formatted.split('.')
         if grouping:
@@ -196,7 +202,7 @@
         formatted = decimal_point.join(parts)
         if seps:
             formatted = _strip_padding(formatted, seps)
-    elif percent[-1] in 'diu':
+    else:
         seps = 0
         if grouping:
             formatted, seps = _group(formatted, monetary=monetary)
@@ -267,7 +273,7 @@
         raise ValueError("Currency formatting is not possible using "
                          "the 'C' locale.")
 
-    s = _format('%%.%if' % digits, abs(val), grouping, monetary=True)
+    s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True)
     # '<' and '>' are markers if the sign must be inserted between symbol and value
     s = '<' + s + '>'
 
@@ -323,6 +329,10 @@
         string = string.replace(dd, '.')
     return string
 
+def localize(string, grouping=False, monetary=False):
+    """Parses a string as locale number according to the locale settings."""
+    return _localize(string, grouping, monetary)
+
 def atof(string, func=float):
     "Parses a string as a float according to the locale settings."
     return func(delocalize(string))
@@ -619,53 +629,49 @@
     """
     _setlocale(category, _build_localename(getdefaultlocale()))
 
-if sys.platform.startswith("win"):
-    # On Win32, this will return the ANSI code page
-    def getpreferredencoding(do_setlocale = True):
-        """Return the charset that the user is likely using."""
-        if sys.flags.utf8_mode:
-            return 'UTF-8'
-        import _bootlocale
-        return _bootlocale.getpreferredencoding(False)
-else:
-    # On Unix, if CODESET is available, use that.
-    try:
-        CODESET
-    except NameError:
+
+try:
+    from _locale import _get_locale_encoding
+except ImportError:
+    def _get_locale_encoding():
         if hasattr(sys, 'getandroidapilevel'):
             # On Android langinfo.h and CODESET are missing, and UTF-8 is
             # always used in mbstowcs() and wcstombs().
-            def getpreferredencoding(do_setlocale = True):
-                return 'UTF-8'
-        else:
-            # Fall back to parsing environment variables :-(
-            def getpreferredencoding(do_setlocale = True):
-                """Return the charset that the user is likely using,
-                by looking at environment variables."""
-                if sys.flags.utf8_mode:
-                    return 'UTF-8'
-                res = getdefaultlocale()[1]
-                if res is None:
-                    # LANG not set, default conservatively to ASCII
-                    res = 'ascii'
-                return res
-    else:
-        def getpreferredencoding(do_setlocale = True):
-            """Return the charset that the user is likely using,
-            according to the system configuration."""
-            if sys.flags.utf8_mode:
-                return 'UTF-8'
-            import _bootlocale
-            if do_setlocale:
-                oldloc = setlocale(LC_CTYPE)
-                try:
-                    setlocale(LC_CTYPE, "")
-                except Error:
-                    pass
-            result = _bootlocale.getpreferredencoding(False)
-            if do_setlocale:
-                setlocale(LC_CTYPE, oldloc)
-            return result
+            return 'UTF-8'
+        if sys.flags.utf8_mode:
+            return 'UTF-8'
+        encoding = getdefaultlocale()[1]
+        if encoding is None:
+            # LANG not set, default conservatively to ASCII
+            encoding = 'ascii'
+        return encoding
+
+try:
+    CODESET
+except NameError:
+    def getpreferredencoding(do_setlocale=True):
+        """Return the charset that the user is likely using."""
+        return _get_locale_encoding()
+else:
+    # On Unix, if CODESET is available, use that.
+    def getpreferredencoding(do_setlocale=True):
+        """Return the charset that the user is likely using,
+        according to the system configuration."""
+        if sys.flags.utf8_mode:
+            return 'UTF-8'
+
+        if not do_setlocale:
+            return _get_locale_encoding()
+
+        old_loc = setlocale(LC_CTYPE)
+        try:
+            try:
+                setlocale(LC_CTYPE, "")
+            except Error:
+                pass
+            return _get_locale_encoding()
+        finally:
+            setlocale(LC_CTYPE, old_loc)
 
 
 ### Database
diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py
index 7b169a1..19bd2bc 100644
--- a/Lib/logging/__init__.py
+++ b/Lib/logging/__init__.py
@@ -118,7 +118,7 @@
 
 def getLevelName(level):
     """
-    Return the textual representation of logging level 'level'.
+    Return the textual or numeric representation of logging level 'level'.
 
     If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
     INFO, DEBUG) then you get the corresponding string. If you have
@@ -128,7 +128,11 @@
     If a numeric value corresponding to one of the defined levels is passed
     in, the corresponding string representation is returned.
 
-    Otherwise, the string "Level %s" % level is returned.
+    If a string representation of the level is passed in, the corresponding
+    numeric value is returned.
+
+    If no matching numeric or string value is passed in, the string
+    'Level %s' % level is returned.
     """
     # See Issues #22386, #27937 and #29220 for why it's this way
     result = _levelToName.get(level)
@@ -194,7 +198,8 @@
             raise ValueError("Unknown level: %r" % level)
         rv = _nameToLevel[level]
     else:
-        raise TypeError("Level not an integer or a valid string: %r" % level)
+        raise TypeError("Level not an integer or a valid string: %r"
+                        % (level,))
     return rv
 
 #---------------------------------------------------------------------------
@@ -411,8 +416,9 @@
     asctime_search = '%(asctime)'
     validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I)
 
-    def __init__(self, fmt):
+    def __init__(self, fmt, *, defaults=None):
         self._fmt = fmt or self.default_format
+        self._defaults = defaults
 
     def usesTime(self):
         return self._fmt.find(self.asctime_search) >= 0
@@ -423,7 +429,11 @@
             raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0]))
 
     def _format(self, record):
-        return self._fmt % record.__dict__
+        if defaults := self._defaults:
+            values = defaults | record.__dict__
+        else:
+            values = record.__dict__
+        return self._fmt % values
 
     def format(self, record):
         try:
@@ -441,7 +451,11 @@
     field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$')
 
     def _format(self, record):
-        return self._fmt.format(**record.__dict__)
+        if defaults := self._defaults:
+            values = defaults | record.__dict__
+        else:
+            values = record.__dict__
+        return self._fmt.format(**values)
 
     def validate(self):
         """Validate the input format, ensure it is the correct string formatting style"""
@@ -467,8 +481,8 @@
     asctime_format = '${asctime}'
     asctime_search = '${asctime}'
 
-    def __init__(self, fmt):
-        self._fmt = fmt or self.default_format
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
         self._tpl = Template(self._fmt)
 
     def usesTime(self):
@@ -490,7 +504,11 @@
             raise ValueError('invalid format: no fields')
 
     def _format(self, record):
-        return self._tpl.substitute(**record.__dict__)
+        if defaults := self._defaults:
+            values = defaults | record.__dict__
+        else:
+            values = record.__dict__
+        return self._tpl.substitute(**values)
 
 
 BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
@@ -546,7 +564,8 @@
 
     converter = time.localtime
 
-    def __init__(self, fmt=None, datefmt=None, style='%', validate=True):
+    def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *,
+                 defaults=None):
         """
         Initialize the formatter with specified format strings.
 
@@ -565,7 +584,7 @@
         if style not in _STYLES:
             raise ValueError('Style must be one of: %s' % ','.join(
                              _STYLES.keys()))
-        self._style = _STYLES[style][0](fmt)
+        self._style = _STYLES[style][0](fmt, defaults=defaults)
         if validate:
             self._style.validate()
 
@@ -859,6 +878,7 @@
         self._name = None
         self.level = _checkLevel(level)
         self.formatter = None
+        self._closed = False
         # Add the handler to the global _handlerList (for cleanup on shutdown)
         _addHandlerRef(self)
         self.createLock()
@@ -977,6 +997,7 @@
         #get the module data lock, as we're updating a shared structure.
         _acquireLock()
         try:    #unlikely to raise an exception, but you never know...
+            self._closed = True
             if self._name and self._name in _handlers:
                 del _handlers[self._name]
         finally:
@@ -1131,8 +1152,14 @@
         self.baseFilename = os.path.abspath(filename)
         self.mode = mode
         self.encoding = encoding
+        if "b" not in mode:
+            self.encoding = io.text_encoding(encoding)
         self.errors = errors
         self.delay = delay
+        # bpo-26789: FileHandler keeps a reference to the builtin open()
+        # function to be able to open or reopen the file during Python
+        # finalization.
+        self._builtin_open = open
         if delay:
             #We don't open the stream, but we still need to call the
             #Handler constructor to set level, formatter, lock etc.
@@ -1159,6 +1186,8 @@
             finally:
                 # Issue #19523: call unconditionally to
                 # prevent a handler leak when delay is set
+                # Also see Issue #42378: we also rely on
+                # self._closed being set to True there
                 StreamHandler.close(self)
         finally:
             self.release()
@@ -1168,8 +1197,9 @@
         Open the current base file with the (original) mode and encoding.
         Return the resulting stream.
         """
-        return open(self.baseFilename, self.mode, encoding=self.encoding,
-                    errors=self.errors)
+        open_func = self._builtin_open
+        return open_func(self.baseFilename, self.mode,
+                         encoding=self.encoding, errors=self.errors)
 
     def emit(self, record):
         """
@@ -1177,10 +1207,15 @@
 
         If the stream was not opened because 'delay' was specified in the
         constructor, open it before calling the superclass's emit.
+
+        If stream is not open, current mode is 'w' and `_closed=True`, record
+        will not be emitted (see Issue #42378).
         """
         if self.stream is None:
-            self.stream = self._open()
-        StreamHandler.emit(self, record)
+            if self.mode != 'w' or not self._closed:
+                self.stream = self._open()
+        if self.stream:
+            StreamHandler.emit(self, record)
 
     def __repr__(self):
         level = getLevelName(self.level)
@@ -1269,6 +1304,14 @@
         self.loggerClass = None
         self.logRecordFactory = None
 
+    @property
+    def disable(self):
+        return self._disable
+
+    @disable.setter
+    def disable(self, value):
+        self._disable = _checkLevel(value)
+
     def getLogger(self, name):
         """
         Get a logger with the specified name (channel name), creating it
@@ -1480,7 +1523,11 @@
         if self.isEnabledFor(CRITICAL):
             self._log(CRITICAL, msg, args, **kwargs)
 
-    fatal = critical
+    def fatal(self, msg, *args, **kwargs):
+        """
+        Don't use this method, use critical() instead.
+        """
+        self.critical(msg, *args, **kwargs)
 
     def log(self, level, msg, *args, **kwargs):
         """
@@ -1751,7 +1798,7 @@
     information in logging output.
     """
 
-    def __init__(self, logger, extra):
+    def __init__(self, logger, extra=None):
         """
         Initialize the adapter with a logger and a dict-like object which
         provides contextual information. This constructor signature allows
@@ -1986,8 +2033,10 @@
                 filename = kwargs.pop("filename", None)
                 mode = kwargs.pop("filemode", 'a')
                 if filename:
-                    if 'b'in mode:
+                    if 'b' in mode:
                         errors = None
+                    else:
+                        encoding = io.text_encoding(encoding)
                     h = FileHandler(filename, mode,
                                     encoding=encoding, errors=errors)
                 else:
@@ -2039,7 +2088,11 @@
         basicConfig()
     root.critical(msg, *args, **kwargs)
 
-fatal = critical
+def fatal(msg, *args, **kwargs):
+    """
+    Don't use this function, use critical() instead.
+    """
+    critical(msg, *args, **kwargs)
 
 def error(msg, *args, **kwargs):
     """
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
index fd3aded..3bc63b7 100644
--- a/Lib/logging/config.py
+++ b/Lib/logging/config.py
@@ -48,7 +48,7 @@
 #   _listener holds the server object doing the listening
 _listener = None
 
-def fileConfig(fname, defaults=None, disable_existing_loggers=True):
+def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None):
     """
     Read the logging configuration from a ConfigParser-format file.
 
@@ -66,7 +66,8 @@
         if hasattr(fname, 'readline'):
             cp.read_file(fname)
         else:
-            cp.read(fname)
+            encoding = io.text_encoding(encoding)
+            cp.read(fname, encoding=encoding)
 
     formatters = _create_formatters(cp)
 
diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py
index 867ef4e..61a3995 100644
--- a/Lib/logging/handlers.py
+++ b/Lib/logging/handlers.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
 #
 # Permission to use, copy, modify, and distribute this software and its
 # documentation for any purpose and without fee is hereby granted,
@@ -18,12 +18,12 @@
 Additional handlers for the logging package for Python. The core package is
 based on PEP 282 and comments thereto in comp.lang.python.
 
-Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
 
 To use, simply 'import logging.handlers' and log away!
 """
 
-import logging, socket, os, pickle, struct, time, re
+import io, logging, socket, os, pickle, struct, time, re
 from stat import ST_DEV, ST_INO, ST_MTIME
 import queue
 import threading
@@ -150,6 +150,8 @@
         # on each run.
         if maxBytes > 0:
             mode = 'a'
+        if "b" not in mode:
+            encoding = io.text_encoding(encoding)
         BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
                                      delay=delay, errors=errors)
         self.maxBytes = maxBytes
@@ -185,14 +187,17 @@
         Basically, see if the supplied record would cause the file to exceed
         the size limit we have.
         """
+        # See bpo-45401: Never rollover anything other than regular files
+        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
+            return False
         if self.stream is None:                 # delay was set...
             self.stream = self._open()
         if self.maxBytes > 0:                   # are we rolling over?
             msg = "%s\n" % self.format(record)
             self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
             if self.stream.tell() + len(msg) >= self.maxBytes:
-                return 1
-        return 0
+                return True
+        return False
 
 class TimedRotatingFileHandler(BaseRotatingHandler):
     """
@@ -205,6 +210,7 @@
     def __init__(self, filename, when='h', interval=1, backupCount=0,
                  encoding=None, delay=False, utc=False, atTime=None,
                  errors=None):
+        encoding = io.text_encoding(encoding)
         BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
                                      delay=delay, errors=errors)
         self.when = when.upper()
@@ -342,10 +348,13 @@
         record is not used, as we are just comparing times, but it is needed so
         the method signatures are the same
         """
+        # See bpo-45401: Never rollover anything other than regular files
+        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
+            return False
         t = int(time.time())
         if t >= self.rolloverAt:
-            return 1
-        return 0
+            return True
+        return False
 
     def getFilesToDelete(self):
         """
@@ -356,13 +365,32 @@
         dirName, baseName = os.path.split(self.baseFilename)
         fileNames = os.listdir(dirName)
         result = []
-        prefix = baseName + "."
+        # See bpo-44753: Don't use the extension when computing the prefix.
+        n, e = os.path.splitext(baseName)
+        prefix = n + '.'
         plen = len(prefix)
         for fileName in fileNames:
+            if self.namer is None:
+                # Our files will always start with baseName
+                if not fileName.startswith(baseName):
+                    continue
+            else:
+                # Our files could be just about anything after custom naming, but
+                # likely candidates are of the form
+                # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
+                if (not fileName.startswith(baseName) and fileName.endswith(e) and
+                    len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
+                    continue
+
             if fileName[:plen] == prefix:
                 suffix = fileName[plen:]
-                if self.extMatch.match(suffix):
-                    result.append(os.path.join(dirName, fileName))
+                # See bpo-45628: The date/time suffix could be anywhere in the
+                # filename
+                parts = suffix.split('.')
+                for part in parts:
+                    if self.extMatch.match(part):
+                        result.append(os.path.join(dirName, fileName))
+                        break
         if len(result) < self.backupCount:
             result = []
         else:
@@ -442,6 +470,8 @@
     """
     def __init__(self, filename, mode='a', encoding=None, delay=False,
                  errors=None):
+        if "b" not in mode:
+            encoding = io.text_encoding(encoding)
         logging.FileHandler.__init__(self, filename, mode=mode,
                                      encoding=encoding, delay=delay,
                                      errors=errors)
@@ -1142,7 +1172,7 @@
 
 class HTTPHandler(logging.Handler):
     """
-    A class which sends records to a Web server, using either GET or
+    A class which sends records to a web server, using either GET or
     POST semantics.
     """
     def __init__(self, host, url, method="GET", secure=False, credentials=None,
@@ -1191,7 +1221,7 @@
         """
         Emit a record.
 
-        Send the record to the Web server as a percent-encoded dictionary
+        Send the record to the web server as a percent-encoded dictionary
         """
         try:
             import urllib.parse
diff --git a/Lib/lzma.py b/Lib/lzma.py
index 0817b87..800f521 100644
--- a/Lib/lzma.py
+++ b/Lib/lzma.py
@@ -225,14 +225,22 @@
         """Write a bytes object to the file.
 
         Returns the number of uncompressed bytes written, which is
-        always len(data). Note that due to buffering, the file on disk
-        may not reflect the data written until close() is called.
+        always the length of data in bytes. Note that due to buffering,
+        the file on disk may not reflect the data written until close()
+        is called.
         """
         self._check_can_write()
+        if isinstance(data, (bytes, bytearray)):
+            length = len(data)
+        else:
+            # accept any data that supports the buffer protocol
+            data = memoryview(data)
+            length = data.nbytes
+
         compressed = self._compressor.compress(data)
         self._fp.write(compressed)
-        self._pos += len(data)
-        return len(data)
+        self._pos += length
+        return length
 
     def seek(self, offset, whence=io.SEEK_SET):
         """Change the file position.
@@ -302,6 +310,7 @@
                            preset=preset, filters=filters)
 
     if "t" in mode:
+        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
diff --git a/Lib/mimetypes.py b/Lib/mimetypes.py
index 92c2a47..b72ce08 100644
--- a/Lib/mimetypes.py
+++ b/Lib/mimetypes.py
@@ -27,6 +27,12 @@
 import sys
 import posixpath
 import urllib.parse
+
+try:
+    from _winapi import _mimetypes_read_windows_registry
+except ImportError:
+    _mimetypes_read_windows_registry = None
+
 try:
     import winreg as _winreg
 except ImportError:
@@ -135,25 +141,23 @@
                 type = 'text/plain'
             return type, None           # never compressed, so encoding is None
         base, ext = posixpath.splitext(url)
-        while ext in self.suffix_map:
-            base, ext = posixpath.splitext(base + self.suffix_map[ext])
+        while (ext_lower := ext.lower()) in self.suffix_map:
+            base, ext = posixpath.splitext(base + self.suffix_map[ext_lower])
+        # encodings_map is case sensitive
         if ext in self.encodings_map:
             encoding = self.encodings_map[ext]
             base, ext = posixpath.splitext(base)
         else:
             encoding = None
+        ext = ext.lower()
         types_map = self.types_map[True]
         if ext in types_map:
             return types_map[ext], encoding
-        elif ext.lower() in types_map:
-            return types_map[ext.lower()], encoding
         elif strict:
             return None, encoding
         types_map = self.types_map[False]
         if ext in types_map:
             return types_map[ext], encoding
-        elif ext.lower() in types_map:
-            return types_map[ext.lower()], encoding
         else:
             return None, encoding
 
@@ -169,7 +173,7 @@
         but non-standard types.
         """
         type = type.lower()
-        extensions = self.types_map_inv[True].get(type, [])
+        extensions = list(self.types_map_inv[True].get(type, []))
         if not strict:
             for ext in self.types_map_inv[False].get(type, []):
                 if ext not in extensions:
@@ -237,10 +241,21 @@
         types.
         """
 
-        # Windows only
-        if not _winreg:
+        if not _mimetypes_read_windows_registry and not _winreg:
             return
 
+        add_type = self.add_type
+        if strict:
+            add_type = lambda type, ext: self.add_type(type, ext, True)
+
+        # Accelerated function if it is available
+        if _mimetypes_read_windows_registry:
+            _mimetypes_read_windows_registry(add_type)
+        elif _winreg:
+            self._read_windows_registry(add_type)
+
+    @classmethod
+    def _read_windows_registry(cls, add_type):
         def enum_types(mimedb):
             i = 0
             while True:
@@ -265,7 +280,7 @@
                             subkey, 'Content Type')
                         if datatype != _winreg.REG_SZ:
                             continue
-                        self.add_type(mimetype, subkeyname, strict)
+                        add_type(mimetype, subkeyname)
                 except OSError:
                     continue
 
@@ -349,8 +364,8 @@
 
     if files is None or _db is None:
         db = MimeTypes()
-        if _winreg:
-            db.read_windows_registry()
+        # Quick return if not supported
+        db.read_windows_registry()
 
         if files is None:
             files = knownfiles
@@ -448,6 +463,7 @@
         '.dvi'    : 'application/x-dvi',
         '.gtar'   : 'application/x-gtar',
         '.hdf'    : 'application/x-hdf',
+        '.h5'     : 'application/x-hdf5',
         '.latex'  : 'application/x-latex',
         '.mif'    : 'application/x-mif',
         '.cdf'    : 'application/x-netcdf',
@@ -480,10 +496,19 @@
         '.wsdl'   : 'application/xml',
         '.xpdl'   : 'application/xml',
         '.zip'    : 'application/zip',
+        '.3gp'    : 'audio/3gpp',
+        '.3gpp'   : 'audio/3gpp',
+        '.3g2'    : 'audio/3gpp2',
+        '.3gpp2'  : 'audio/3gpp2',
+        '.aac'    : 'audio/aac',
+        '.adts'   : 'audio/aac',
+        '.loas'   : 'audio/aac',
+        '.ass'    : 'audio/aac',
         '.au'     : 'audio/basic',
         '.snd'    : 'audio/basic',
         '.mp3'    : 'audio/mpeg',
         '.mp2'    : 'audio/mpeg',
+        '.opus'   : 'audio/opus',
         '.aif'    : 'audio/x-aiff',
         '.aifc'   : 'audio/x-aiff',
         '.aiff'   : 'audio/x-aiff',
@@ -495,6 +520,8 @@
         '.jpg'    : 'image/jpeg',
         '.jpe'    : 'image/jpeg',
         '.jpeg'   : 'image/jpeg',
+        '.heic'   : 'image/heic',
+        '.heif'   : 'image/heif',
         '.png'    : 'image/png',
         '.svg'    : 'image/svg+xml',
         '.tiff'   : 'image/tiff',
diff --git a/Lib/msilib/schema.py b/Lib/msilib/schema.py
index eeb3ecd..9f5745c 100644
--- a/Lib/msilib/schema.py
+++ b/Lib/msilib/schema.py
@@ -664,7 +664,7 @@
 ('Class','Component_','N',None, None, 'Component',1,'Identifier',None, 'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
 ('Class','Context','N',None, None, None, None, 'Identifier',None, 'The numeric server context for this server. CLSCTX_xxxx',),
 ('Class','DefInprocHandler','Y',None, None, None, None, 'Filename','1;2;3','Optional default inproc handler.  Only optionally provided if Context=CLSCTX_LOCAL_SERVER.  Typically "ole32.dll" or "mapi32.dll"',),
-('Class','FileTypeMask','Y',None, None, None, None, 'Text',None, 'Optional string containing information for the HKCRthis CLSID) key. If multiple patterns exist, they must be delimited by a semicolon, and numeric subkeys will be generated: 0,1,2...',),
+('Class','FileTypeMask','Y',None, None, None, None, 'Text',None, 'Optional string containing information for the HKCRthis CLSID key. If multiple patterns exist, they must be delimited by a semicolon, and numeric subkeys will be generated: 0,1,2...',),
 ('Class','Icon_','Y',None, None, 'Icon',1,'Identifier',None, 'Optional foreign key into the Icon Table, specifying the icon file associated with this CLSID. Will be written under the DefaultIcon key.',),
 ('Class','IconIndex','Y',-32767,32767,None, None, None, None, 'Optional icon index.',),
 ('Class','ProgId_Default','Y',None, None, 'ProgId',1,'Text',None, 'Optional ProgId associated with this CLSID.',),
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py
index 0eb16c6..b6b4cdd 100644
--- a/Lib/multiprocessing/managers.py
+++ b/Lib/multiprocessing/managers.py
@@ -8,8 +8,7 @@
 # Licensed to PSF under a Contributor Agreement.
 #
 
-__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
-            'SharedMemoryManager' ]
+__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
 
 #
 # Imports
@@ -35,9 +34,11 @@
 from . import get_context
 try:
     from . import shared_memory
-    HAS_SHMEM = True
 except ImportError:
     HAS_SHMEM = False
+else:
+    HAS_SHMEM = True
+    __all__.append('SharedMemoryManager')
 
 #
 # Register some things for pickling
@@ -192,11 +193,8 @@
             t.daemon = True
             t.start()
 
-    def handle_request(self, c):
-        '''
-        Handle a new connection
-        '''
-        funcname = result = request = None
+    def _handle_request(self, c):
+        request = None
         try:
             connection.deliver_challenge(c, self.authkey)
             connection.answer_challenge(c, self.authkey)
@@ -213,6 +211,7 @@
                 msg = ('#TRACEBACK', format_exc())
             else:
                 msg = ('#RETURN', result)
+
         try:
             c.send(msg)
         except Exception as e:
@@ -224,7 +223,17 @@
             util.info(' ... request was %r', request)
             util.info(' ... exception was %r', e)
 
-        c.close()
+    def handle_request(self, conn):
+        '''
+        Handle a new connection
+        '''
+        try:
+            self._handle_request(conn)
+        except SystemExit:
+            # Server.serve_client() calls sys.exit(0) on EOF
+            pass
+        finally:
+            conn.close()
 
     def serve_client(self, conn):
         '''
@@ -959,7 +968,7 @@
 
 
 def AutoProxy(token, serializer, manager=None, authkey=None,
-              exposed=None, incref=True):
+              exposed=None, incref=True, manager_owned=False):
     '''
     Return an auto-proxy for `token`
     '''
@@ -979,7 +988,7 @@
 
     ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
     proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
-                      incref=incref)
+                      incref=incref, manager_owned=manager_owned)
     proxy._isauto = True
     return proxy
 
diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py
index c9bfa9b..cc42dbd 100644
--- a/Lib/multiprocessing/resource_tracker.py
+++ b/Lib/multiprocessing/resource_tracker.py
@@ -37,8 +37,16 @@
     import _multiprocessing
     import _posixshmem
 
+    # Use sem_unlink() to clean up named semaphores.
+    #
+    # sem_unlink() may be missing if the Python build process detected the
+    # absence of POSIX named semaphores. In that case, no named semaphores were
+    # ever opened, so no cleanup would be necessary.
+    if hasattr(_multiprocessing, 'sem_unlink'):
+        _CLEANUP_FUNCS.update({
+            'semaphore': _multiprocessing.sem_unlink,
+        })
     _CLEANUP_FUNCS.update({
-        'semaphore': _multiprocessing.sem_unlink,
         'shared_memory': _posixshmem.shm_unlink,
     })
 
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py
index 21f2a7e..a468333 100644
--- a/Lib/multiprocessing/util.py
+++ b/Lib/multiprocessing/util.py
@@ -419,7 +419,7 @@
     try:
         fd = os.open(os.devnull, os.O_RDONLY)
         try:
-            sys.stdin = open(fd, closefd=False)
+            sys.stdin = open(fd, encoding="utf-8", closefd=False)
         except:
             os.close(fd)
             raise
diff --git a/Lib/netrc.py b/Lib/netrc.py
index f0ae48c..734d94c 100644
--- a/Lib/netrc.py
+++ b/Lib/netrc.py
@@ -26,8 +26,12 @@
             file = os.path.join(os.path.expanduser("~"), ".netrc")
         self.hosts = {}
         self.macros = {}
-        with open(file) as fp:
-            self._parse(file, fp, default_netrc)
+        try:
+            with open(file, encoding="utf-8") as fp:
+                self._parse(file, fp, default_netrc)
+        except UnicodeDecodeError:
+            with open(file, encoding="locale") as fp:
+                self._parse(file, fp, default_netrc)
 
     def _parse(self, file, fp, default_netrc):
         lexer = shlex.shlex(fp)
diff --git a/Lib/ntpath.py b/Lib/ntpath.py
index 6f77177..527c7ae 100644
--- a/Lib/ntpath.py
+++ b/Lib/ntpath.py
@@ -312,12 +312,25 @@
             drive = ''
         userhome = join(drive, os.environ['HOMEPATH'])
 
+    if i != 1: #~user
+        target_user = path[1:i]
+        if isinstance(target_user, bytes):
+            target_user = os.fsdecode(target_user)
+        current_user = os.environ.get('USERNAME')
+
+        if target_user != current_user:
+            # Try to guess user home directory.  By default all user
+            # profile directories are located in the same place and are
+            # named by corresponding usernames.  If userhome isn't a
+            # normal profile directory, this guess is likely wrong,
+            # so we bail out.
+            if current_user != basename(userhome):
+                return path
+            userhome = join(dirname(userhome), target_user)
+
     if isinstance(path, bytes):
         userhome = os.fsencode(userhome)
 
-    if i != 1: #~user
-        userhome = join(dirname(userhome), path[1:i])
-
     return userhome + path[i:]
 
 
@@ -622,7 +635,7 @@
                 tail = join(name, tail) if tail else name
         return tail
 
-    def realpath(path):
+    def realpath(path, *, strict=False):
         path = normpath(path)
         if isinstance(path, bytes):
             prefix = b'\\\\?\\'
@@ -647,6 +660,8 @@
             path = _getfinalpathname(path)
             initial_winerror = 0
         except OSError as ex:
+            if strict:
+                raise
             initial_winerror = ex.winerror
             path = _getfinalpathname_nonstrict(path)
         # The path returned by _getfinalpathname will always start with \\?\ -
diff --git a/Lib/nturl2path.py b/Lib/nturl2path.py
index 853e660..61852af 100644
--- a/Lib/nturl2path.py
+++ b/Lib/nturl2path.py
@@ -50,6 +50,14 @@
     # becomes
     #   ///C:/foo/bar/spam.foo
     import urllib.parse
+    # First, clean up some special forms. We are going to sacrifice
+    # the additional information anyway
+    if p[:4] == '\\\\?\\':
+        p = p[4:]
+        if p[:4].upper() == 'UNC\\':
+            p = '\\' + p[4:]
+        elif p[1:2] != ':':
+            raise OSError('Bad path: ' + p)
     if not ':' in p:
         # No drive specifier, just convert slashes and quote the name
         if p[:2] == '\\\\':
@@ -59,7 +67,7 @@
             p = '\\\\' + p
         components = p.split('\\')
         return urllib.parse.quote('/'.join(components))
-    comp = p.split(':')
+    comp = p.split(':', maxsplit=2)
     if len(comp) != 2 or len(comp[0]) > 1:
         error = 'Bad path: ' + p
         raise OSError(error)
diff --git a/Lib/numbers.py b/Lib/numbers.py
index ed815ef..5b98e64 100644
--- a/Lib/numbers.py
+++ b/Lib/numbers.py
@@ -33,7 +33,7 @@
     """Complex defines the operations that work on the builtin complex type.
 
     In short, those are: a conversion to complex, .real, .imag, +, -,
-    *, /, abs(), .conjugate, ==, and !=.
+    *, /, **, abs(), .conjugate, ==, and !=.
 
     If it is given heterogeneous arguments, and doesn't have special
     knowledge about them, it should fall back to the builtin complex
@@ -292,7 +292,11 @@
 
 
 class Integral(Rational):
-    """Integral adds a conversion to int and the bit-string operations."""
+    """Integral adds methods that work on integral numbers.
+
+    In short, these are conversion to int, pow with modulus, and the
+    bit-string operations.
+    """
 
     __slots__ = ()
 
diff --git a/Lib/opcode.py b/Lib/opcode.py
index ac1aa53..37e88e9 100644
--- a/Lib/opcode.py
+++ b/Lib/opcode.py
@@ -67,7 +67,6 @@
 def_op('UNARY_NOT', 12)
 
 def_op('UNARY_INVERT', 15)
-
 def_op('BINARY_MATRIX_MULTIPLY', 16)
 def_op('INPLACE_MATRIX_MULTIPLY', 17)
 
@@ -82,8 +81,12 @@
 def_op('BINARY_TRUE_DIVIDE', 27)
 def_op('INPLACE_FLOOR_DIVIDE', 28)
 def_op('INPLACE_TRUE_DIVIDE', 29)
+def_op('GET_LEN', 30)
+def_op('MATCH_MAPPING', 31)
+def_op('MATCH_SEQUENCE', 32)
+def_op('MATCH_KEYS', 33)
+def_op('COPY_DICT_WITHOUT_KEYS', 34)
 
-def_op('RERAISE', 48)
 def_op('WITH_EXCEPT_START', 49)
 def_op('GET_AITER', 50)
 def_op('GET_ANEXT', 51)
@@ -105,7 +108,6 @@
 def_op('INPLACE_POWER', 67)
 def_op('GET_ITER', 68)
 def_op('GET_YIELD_FROM_ITER', 69)
-
 def_op('PRINT_EXPR', 70)
 def_op('LOAD_BUILD_CLASS', 71)
 def_op('YIELD_FROM', 72)
@@ -137,6 +139,7 @@
 name_op('DELETE_ATTR', 96)      # ""
 name_op('STORE_GLOBAL', 97)     # ""
 name_op('DELETE_GLOBAL', 98)    # ""
+def_op('ROT_N', 99)
 def_op('LOAD_CONST', 100)       # Index in const list
 hasconst.append(100)
 name_op('LOAD_NAME', 101)       # Index in name list
@@ -149,18 +152,16 @@
 hascompare.append(107)
 name_op('IMPORT_NAME', 108)     # Index in name list
 name_op('IMPORT_FROM', 109)     # Index in name list
-
 jrel_op('JUMP_FORWARD', 110)    # Number of bytes to skip
 jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
 jabs_op('JUMP_IF_TRUE_OR_POP', 112)  # ""
 jabs_op('JUMP_ABSOLUTE', 113)        # ""
 jabs_op('POP_JUMP_IF_FALSE', 114)    # ""
 jabs_op('POP_JUMP_IF_TRUE', 115)     # ""
-
 name_op('LOAD_GLOBAL', 116)     # Index in name list
-
 def_op('IS_OP', 117)
 def_op('CONTAINS_OP', 118)
+def_op('RERAISE', 119)
 
 jabs_op('JUMP_IF_NOT_EXC_MATCH', 121)
 jrel_op('SETUP_FINALLY', 122)   # Distance to target address
@@ -172,10 +173,12 @@
 def_op('DELETE_FAST', 126)      # Local variable number
 haslocal.append(126)
 
+def_op('GEN_START', 129)        # Kind of generator/coroutine
 def_op('RAISE_VARARGS', 130)    # Number of raise arguments (1, 2, or 3)
 def_op('CALL_FUNCTION', 131)    # #args
 def_op('MAKE_FUNCTION', 132)    # Flags
 def_op('BUILD_SLICE', 133)      # Number of items
+
 def_op('LOAD_CLOSURE', 135)
 hasfree.append(135)
 def_op('LOAD_DEREF', 136)
@@ -187,28 +190,24 @@
 
 def_op('CALL_FUNCTION_KW', 141)  # #args + #kwargs
 def_op('CALL_FUNCTION_EX', 142)  # Flags
-
 jrel_op('SETUP_WITH', 143)
-
+def_op('EXTENDED_ARG', 144)
+EXTENDED_ARG = 144
 def_op('LIST_APPEND', 145)
 def_op('SET_ADD', 146)
 def_op('MAP_ADD', 147)
-
 def_op('LOAD_CLASSDEREF', 148)
 hasfree.append(148)
 
-def_op('EXTENDED_ARG', 144)
-EXTENDED_ARG = 144
+def_op('MATCH_CLASS', 152)
 
 jrel_op('SETUP_ASYNC_WITH', 154)
-
 def_op('FORMAT_VALUE', 155)
 def_op('BUILD_CONST_KEY_MAP', 156)
 def_op('BUILD_STRING', 157)
 
 name_op('LOAD_METHOD', 160)
 def_op('CALL_METHOD', 161)
-
 def_op('LIST_EXTEND', 162)
 def_op('SET_UPDATE', 163)
 def_op('DICT_MERGE', 164)
diff --git a/Lib/operator.py b/Lib/operator.py
index fb58851..241fdbb 100644
--- a/Lib/operator.py
+++ b/Lib/operator.py
@@ -155,10 +155,10 @@
     return b in a
 
 def countOf(a, b):
-    "Return the number of times b occurs in a."
+    "Return the number of items in a which are, or which equal, b."
     count = 0
     for i in a:
-        if i == b:
+        if i is b or i == b:
             count += 1
     return count
 
@@ -173,7 +173,7 @@
 def indexOf(a, b):
     "Return the first index of b in a."
     for i, j in enumerate(a):
-        if j == b:
+        if j is b or j == b:
             return i
     else:
         raise ValueError('sequence.index(x): x not in sequence')
diff --git a/Lib/os.py b/Lib/os.py
index b794159..d26cfc9 100644
--- a/Lib/os.py
+++ b/Lib/os.py
@@ -36,7 +36,7 @@
 __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
            "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
            "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
-           "popen", "extsep"]
+           "extsep"]
 
 def _exists(name):
     return name in globals()
@@ -969,58 +969,64 @@
 
     __all__.extend(["spawnlp", "spawnlpe"])
 
-
-# Supply os.popen()
-def popen(cmd, mode="r", buffering=-1):
-    if not isinstance(cmd, str):
-        raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
-    if mode not in ("r", "w"):
-        raise ValueError("invalid mode %r" % mode)
-    if buffering == 0 or buffering is None:
-        raise ValueError("popen() does not support unbuffered streams")
-    import subprocess, io
-    if mode == "r":
-        proc = subprocess.Popen(cmd,
-                                shell=True,
-                                stdout=subprocess.PIPE,
-                                bufsize=buffering)
-        return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
-    else:
-        proc = subprocess.Popen(cmd,
-                                shell=True,
-                                stdin=subprocess.PIPE,
-                                bufsize=buffering)
-        return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
-
-# Helper for popen() -- a proxy for a file whose close waits for the process
-class _wrap_close:
-    def __init__(self, stream, proc):
-        self._stream = stream
-        self._proc = proc
-    def close(self):
-        self._stream.close()
-        returncode = self._proc.wait()
-        if returncode == 0:
-            return None
-        if name == 'nt':
-            return returncode
+# VxWorks has no user space shell provided. As a result, running
+# command in a shell can't be supported.
+if sys.platform != 'vxworks':
+    # Supply os.popen()
+    def popen(cmd, mode="r", buffering=-1):
+        if not isinstance(cmd, str):
+            raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
+        if mode not in ("r", "w"):
+            raise ValueError("invalid mode %r" % mode)
+        if buffering == 0 or buffering is None:
+            raise ValueError("popen() does not support unbuffered streams")
+        import subprocess, io
+        if mode == "r":
+            proc = subprocess.Popen(cmd,
+                                    shell=True, text=True,
+                                    stdout=subprocess.PIPE,
+                                    bufsize=buffering)
+            return _wrap_close(proc.stdout, proc)
         else:
-            return returncode << 8  # Shift left to match old behavior
-    def __enter__(self):
-        return self
-    def __exit__(self, *args):
-        self.close()
-    def __getattr__(self, name):
-        return getattr(self._stream, name)
-    def __iter__(self):
-        return iter(self._stream)
+            proc = subprocess.Popen(cmd,
+                                    shell=True, text=True,
+                                    stdin=subprocess.PIPE,
+                                    bufsize=buffering)
+            return _wrap_close(proc.stdin, proc)
+
+    # Helper for popen() -- a proxy for a file whose close waits for the process
+    class _wrap_close:
+        def __init__(self, stream, proc):
+            self._stream = stream
+            self._proc = proc
+        def close(self):
+            self._stream.close()
+            returncode = self._proc.wait()
+            if returncode == 0:
+                return None
+            if name == 'nt':
+                return returncode
+            else:
+                return returncode << 8  # Shift left to match old behavior
+        def __enter__(self):
+            return self
+        def __exit__(self, *args):
+            self.close()
+        def __getattr__(self, name):
+            return getattr(self._stream, name)
+        def __iter__(self):
+            return iter(self._stream)
+
+    __all__.append("popen")
 
 # Supply os.fdopen()
-def fdopen(fd, *args, **kwargs):
+def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs):
     if not isinstance(fd, int):
         raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
     import io
-    return io.open(fd, *args, **kwargs)
+    if "b" not in mode:
+        encoding = io.text_encoding(encoding)
+    return io.open(fd, mode, buffering, encoding, *args, **kwargs)
 
 
 # For testing purposes, make sure the function is available when the C
diff --git a/Lib/pathlib.py b/Lib/pathlib.py
index 147be2f..621fba0 100644
--- a/Lib/pathlib.py
+++ b/Lib/pathlib.py
@@ -6,6 +6,7 @@
 import posixpath
 import re
 import sys
+import warnings
 from _collections_abc import Sequence
 from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP
 from operator import attrgetter
@@ -13,18 +14,6 @@
 from urllib.parse import quote_from_bytes as urlquote_from_bytes
 
 
-supports_symlinks = True
-if os.name == 'nt':
-    import nt
-    if sys.getwindowsversion()[:2] >= (6, 0):
-        from nt import _getfinalpathname
-    else:
-        supports_symlinks = False
-        _getfinalpathname = None
-else:
-    nt = None
-
-
 __all__ = [
     "PurePath", "PurePosixPath", "PureWindowsPath",
     "Path", "PosixPath", "WindowsPath",
@@ -34,13 +23,17 @@
 # Internals
 #
 
+_WINERROR_NOT_READY = 21  # drive exists but is not accessible
+_WINERROR_INVALID_NAME = 123  # fix for bpo-35306
+_WINERROR_CANT_RESOLVE_FILENAME = 1921  # broken symlink pointing to itself
+
 # EBADF - guard against macOS `stat` throwing EBADF
 _IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP)
 
 _IGNORED_WINERRORS = (
-    21,  # ERROR_NOT_READY - drive exists but is not accessible
-    1921,  # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
-)
+    _WINERROR_NOT_READY,
+    _WINERROR_INVALID_NAME,
+    _WINERROR_CANT_RESOLVE_FILENAME)
 
 def _ignore_error(exception):
     return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
@@ -131,16 +124,25 @@
     ext_namespace_prefix = '\\\\?\\'
 
     reserved_names = (
-        {'CON', 'PRN', 'AUX', 'NUL'} |
-        {'COM%d' % i for i in range(1, 10)} |
-        {'LPT%d' % i for i in range(1, 10)}
+        {'CON', 'PRN', 'AUX', 'NUL', 'CONIN$', 'CONOUT$'} |
+        {'COM%s' % c for c in '123456789\xb9\xb2\xb3'} |
+        {'LPT%s' % c for c in '123456789\xb9\xb2\xb3'}
         )
 
     # Interesting findings about extended paths:
-    # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
-    #   but '\\?\c:/a' is not
-    # - extended paths are always absolute; "relative" extended paths will
-    #   fail.
+    # * '\\?\c:\a' is an extended path, which bypasses normal Windows API
+    #   path processing. Thus relative paths are not resolved and slash is not
+    #   translated to backslash. It has the native NT path limit of 32767
+    #   characters, but a bit less after resolving device symbolic links,
+    #   such as '\??\C:' => '\Device\HarddiskVolume2'.
+    # * '\\?\c:/a' looks for a device named 'C:/a' because slash is a
+    #   regular name character in the object namespace.
+    # * '\\?\c:\foo/bar' is invalid because '/' is illegal in NT filesystems.
+    #   The only path separator at the filesystem level is backslash.
+    # * '//?/c:\a' and '//?/c:/a' are effectively equivalent to '\\.\c:\a' and
+    #   thus limited to MAX_PATH.
+    # * Prior to Windows 8, ANSI API bytes paths are limited to MAX_PATH,
+    #   even with the '\\?\' prefix.
 
     def splitroot(self, part, sep=sep):
         first = part[0:1]
@@ -190,30 +192,6 @@
     def compile_pattern(self, pattern):
         return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
 
-    def resolve(self, path, strict=False):
-        s = str(path)
-        if not s:
-            return os.getcwd()
-        previous_s = None
-        if _getfinalpathname is not None:
-            if strict:
-                return self._ext_to_normal(_getfinalpathname(s))
-            else:
-                tail_parts = []  # End of the path after the first one not found
-                while True:
-                    try:
-                        s = self._ext_to_normal(_getfinalpathname(s))
-                    except FileNotFoundError:
-                        previous_s = s
-                        s, tail = os.path.split(s)
-                        tail_parts.append(tail)
-                        if previous_s == s:
-                            return path
-                    else:
-                        return os.path.join(s, *reversed(tail_parts))
-        # Means fallback on absolute
-        return None
-
     def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
         prefix = ''
         if s.startswith(ext_prefix):
@@ -224,21 +202,18 @@
                 s = '\\' + s[3:]
         return prefix, s
 
-    def _ext_to_normal(self, s):
-        # Turn back an extended path into a normal DOS-like path
-        return self._split_extended_path(s)[1]
-
     def is_reserved(self, parts):
         # NOTE: the rules for reserved names seem somewhat complicated
-        # (e.g. r"..\NUL" is reserved but not r"foo\NUL").
-        # We err on the side of caution and return True for paths which are
-        # not considered reserved by Windows.
+        # (e.g. r"..\NUL" is reserved but not r"foo\NUL" if "foo" does not
+        # exist). We err on the side of caution and return True for paths
+        # which are not considered reserved by Windows.
         if not parts:
             return False
         if parts[0].startswith('\\\\'):
             # UNC paths are never reserved
             return False
-        return parts[-1].partition('.')[0].upper() in self.reserved_names
+        name = parts[-1].partition('.')[0].partition(':')[0].rstrip(' ')
+        return name.upper() in self.reserved_names
 
     def make_uri(self, path):
         # Under Windows, file URIs use the UTF-8 encoding.
@@ -252,34 +227,6 @@
             # It's a path on a network drive => 'file://host/share/a/b'
             return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
 
-    def gethomedir(self, username):
-        if 'USERPROFILE' in os.environ:
-            userhome = os.environ['USERPROFILE']
-        elif 'HOMEPATH' in os.environ:
-            try:
-                drv = os.environ['HOMEDRIVE']
-            except KeyError:
-                drv = ''
-            userhome = drv + os.environ['HOMEPATH']
-        else:
-            raise RuntimeError("Can't determine home directory")
-
-        if username:
-            # Try to guess user home directory.  By default all users
-            # directories are located in the same place and are named by
-            # corresponding usernames.  If current user home directory points
-            # to nonstandard place, this guess is likely wrong.
-            if os.environ['USERNAME'] != username:
-                drv, root, parts = self.parse_parts((userhome,))
-                if parts[-1] != os.environ['USERNAME']:
-                    raise RuntimeError("Can't determine home directory "
-                                       "for %r" % username)
-                parts[-1] = username
-                if drv or root:
-                    userhome = drv + root + self.join(parts[1:])
-                else:
-                    userhome = self.join(parts)
-        return userhome
 
 class _PosixFlavour(_Flavour):
     sep = '/'
@@ -313,54 +260,6 @@
     def compile_pattern(self, pattern):
         return re.compile(fnmatch.translate(pattern)).fullmatch
 
-    def resolve(self, path, strict=False):
-        sep = self.sep
-        accessor = path._accessor
-        seen = {}
-        def _resolve(path, rest):
-            if rest.startswith(sep):
-                path = ''
-
-            for name in rest.split(sep):
-                if not name or name == '.':
-                    # current dir
-                    continue
-                if name == '..':
-                    # parent dir
-                    path, _, _ = path.rpartition(sep)
-                    continue
-                if path.endswith(sep):
-                    newpath = path + name
-                else:
-                    newpath = path + sep + name
-                if newpath in seen:
-                    # Already seen this path
-                    path = seen[newpath]
-                    if path is not None:
-                        # use cached value
-                        continue
-                    # The symlink is not resolved, so we must have a symlink loop.
-                    raise RuntimeError("Symlink loop from %r" % newpath)
-                # Resolve the symbolic link
-                try:
-                    target = accessor.readlink(newpath)
-                except OSError as e:
-                    if e.errno != EINVAL and strict:
-                        raise
-                    # Not a symlink, or non-strict mode. We just leave the path
-                    # untouched.
-                    path = newpath
-                else:
-                    seen[newpath] = None # not resolved symlink
-                    path = _resolve(path, target)
-                    seen[newpath] = path # resolved symlink
-
-            return path
-        # NOTE: according to POSIX, getcwd() cannot contain path components
-        # which are symlinks.
-        base = '' if path.is_absolute() else os.getcwd()
-        return _resolve(base, str(path)) or sep
-
     def is_reserved(self, parts):
         return False
 
@@ -370,21 +269,6 @@
         bpath = bytes(path)
         return 'file://' + urlquote_from_bytes(bpath)
 
-    def gethomedir(self, username):
-        if not username:
-            try:
-                return os.environ['HOME']
-            except KeyError:
-                import pwd
-                return pwd.getpwuid(os.getuid()).pw_dir
-        else:
-            import pwd
-            try:
-                return pwd.getpwnam(username).pw_dir
-            except KeyError:
-                raise RuntimeError("Can't determine home directory "
-                                   "for %r" % username)
-
 
 _windows_flavour = _WindowsFlavour()
 _posix_flavour = _PosixFlavour()
@@ -399,9 +283,7 @@
 
     stat = os.stat
 
-    lstat = os.lstat
-
-    open = os.open
+    open = io.open
 
     listdir = os.listdir
 
@@ -409,21 +291,14 @@
 
     chmod = os.chmod
 
-    if hasattr(os, "lchmod"):
-        lchmod = os.lchmod
-    else:
-        def lchmod(self, pathobj, mode):
-            raise NotImplementedError("lchmod() not available on this system")
-
     mkdir = os.mkdir
 
     unlink = os.unlink
 
     if hasattr(os, "link"):
-        link_to = os.link
+        link = os.link
     else:
-        @staticmethod
-        def link_to(self, target):
+        def link(self, src, dst):
             raise NotImplementedError("os.link() not available on this system")
 
     rmdir = os.rmdir
@@ -432,23 +307,35 @@
 
     replace = os.replace
 
-    if nt:
-        if supports_symlinks:
-            symlink = os.symlink
-        else:
-            def symlink(a, b, target_is_directory):
-                raise NotImplementedError("symlink() not available on this system")
+    if hasattr(os, "symlink"):
+        symlink = os.symlink
     else:
-        # Under POSIX, os.symlink() takes two args
-        @staticmethod
-        def symlink(a, b, target_is_directory):
-            return os.symlink(a, b)
+        def symlink(self, src, dst, target_is_directory=False):
+            raise NotImplementedError("os.symlink() not available on this system")
 
-    utime = os.utime
+    def touch(self, path, mode=0o666, exist_ok=True):
+        if exist_ok:
+            # First try to bump modification time
+            # Implementation note: GNU touch uses the UTIME_NOW option of
+            # the utimensat() / futimens() functions.
+            try:
+                os.utime(path, None)
+            except OSError:
+                # Avoid exception chaining
+                pass
+            else:
+                return
+        flags = os.O_CREAT | os.O_WRONLY
+        if not exist_ok:
+            flags |= os.O_EXCL
+        fd = os.open(path, flags, mode)
+        os.close(fd)
 
-    # Helper for resolve()
-    def readlink(self, path):
-        return os.readlink(path)
+    if hasattr(os, "readlink"):
+        readlink = os.readlink
+    else:
+        def readlink(self, path):
+            raise NotImplementedError("os.readlink() not available on this system")
 
     def owner(self, path):
         try:
@@ -464,6 +351,12 @@
         except ImportError:
             raise NotImplementedError("Path.group() is unsupported on this system")
 
+    getcwd = os.getcwd
+
+    expanduser = staticmethod(os.path.expanduser)
+
+    realpath = staticmethod(os.path.realpath)
+
 
 _normal_accessor = _NormalAccessor()
 
@@ -630,7 +523,10 @@
             return len(self._parts)
 
     def __getitem__(self, idx):
-        if idx < 0 or idx >= len(self):
+        if isinstance(idx, slice):
+            return tuple(self[i] for i in range(*idx.indices(len(self))))
+
+        if idx >= len(self) or idx < -len(self):
             raise IndexError(idx)
         return self._pathcls._from_parsed_parts(self._drv, self._root,
                                                 self._parts[:-idx - 1])
@@ -689,7 +585,7 @@
         return cls._flavour.parse_parts(parts)
 
     @classmethod
-    def _from_parts(cls, args, init=True):
+    def _from_parts(cls, args):
         # We need to call _parse_args on the instance, so as to get the
         # right flavour.
         self = object.__new__(cls)
@@ -697,18 +593,14 @@
         self._drv = drv
         self._root = root
         self._parts = parts
-        if init:
-            self._init()
         return self
 
     @classmethod
-    def _from_parsed_parts(cls, drv, root, parts, init=True):
+    def _from_parsed_parts(cls, drv, root, parts):
         self = object.__new__(cls)
         self._drv = drv
         self._root = root
         self._parts = parts
-        if init:
-            self._init()
         return self
 
     @classmethod
@@ -718,10 +610,6 @@
         else:
             return cls._flavour.join(parts)
 
-    def _init(self):
-        # Overridden in concrete Path
-        pass
-
     def _make_child(self, args):
         drv, root, parts = self._parse_args(args)
         drv, root, parts = self._flavour.join_parsed_parts(
@@ -1061,29 +949,18 @@
     object. You can also instantiate a PosixPath or WindowsPath directly,
     but cannot instantiate a WindowsPath on a POSIX system or vice versa.
     """
-    __slots__ = (
-        '_accessor',
-    )
+    _accessor = _normal_accessor
+    __slots__ = ()
 
     def __new__(cls, *args, **kwargs):
         if cls is Path:
             cls = WindowsPath if os.name == 'nt' else PosixPath
-        self = cls._from_parts(args, init=False)
+        self = cls._from_parts(args)
         if not self._flavour.is_supported:
             raise NotImplementedError("cannot instantiate %r on your system"
                                       % (cls.__name__,))
-        self._init()
         return self
 
-    def _init(self,
-              # Private non-constructor arguments
-              template=None,
-              ):
-        if template is not None:
-            self._accessor = template._accessor
-        else:
-            self._accessor = _normal_accessor
-
     def _make_child_relpath(self, part):
         # This is an optimization used for dir walking.  `part` must be
         # a single part relative to this path.
@@ -1104,17 +981,6 @@
         # removed in the future.
         pass
 
-    def _opener(self, name, flags, mode=0o666):
-        # A stub for the opener argument to built-in open()
-        return self._accessor.open(self, flags, mode)
-
-    def _raw_open(self, flags, mode=0o777):
-        """
-        Open the file pointed by this path and return a file descriptor,
-        as os.open() does.
-        """
-        return self._accessor.open(self, flags, mode)
-
     # Public API
 
     @classmethod
@@ -1122,14 +988,14 @@
         """Return a new path pointing to the current working directory
         (as returned by os.getcwd()).
         """
-        return cls(os.getcwd())
+        return cls(cls._accessor.getcwd())
 
     @classmethod
     def home(cls):
         """Return a new path pointing to the user's home directory (as
         returned by os.path.expanduser('~')).
         """
-        return cls(cls()._flavour.gethomedir(None))
+        return cls("~").expanduser()
 
     def samefile(self, other_path):
         """Return whether other_path is the same or not as this file
@@ -1191,9 +1057,7 @@
             return self
         # FIXME this must defer to the specific flavour (and, under Windows,
         # use nt._getfullpathname())
-        obj = self._from_parts([os.getcwd()] + self._parts, init=False)
-        obj._init(template=self)
-        return obj
+        return self._from_parts([self._accessor.getcwd()] + self._parts)
 
     def resolve(self, strict=False):
         """
@@ -1201,24 +1065,34 @@
         normalizing it (for example turning slashes into backslashes under
         Windows).
         """
-        s = self._flavour.resolve(self, strict=strict)
-        if s is None:
-            # No symlink resolution => for consistency, raise an error if
-            # the path doesn't exist or is forbidden
-            self.stat()
-            s = str(self.absolute())
-        # Now we have no symlinks in the path, it's safe to normalize it.
-        normed = self._flavour.pathmod.normpath(s)
-        obj = self._from_parts((normed,), init=False)
-        obj._init(template=self)
-        return obj
 
-    def stat(self):
+        def check_eloop(e):
+            winerror = getattr(e, 'winerror', 0)
+            if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME:
+                raise RuntimeError("Symlink loop from %r" % e.filename)
+
+        try:
+            s = self._accessor.realpath(self, strict=strict)
+        except OSError as e:
+            check_eloop(e)
+            raise
+        p = self._from_parts((s,))
+
+        # In non-strict mode, realpath() doesn't raise on symlink loops.
+        # Ensure we get an exception by calling stat()
+        if not strict:
+            try:
+                p.stat()
+            except OSError as e:
+                check_eloop(e)
+        return p
+
+    def stat(self, *, follow_symlinks=True):
         """
         Return the result of the stat() system call on this path, like
         os.stat() does.
         """
-        return self._accessor.stat(self)
+        return self._accessor.stat(self, follow_symlinks=follow_symlinks)
 
     def owner(self):
         """
@@ -1238,8 +1112,10 @@
         Open the file pointed by this path and return a file object, as
         the built-in open() function does.
         """
-        return io.open(self, mode, buffering, encoding, errors, newline,
-                       opener=self._opener)
+        if "b" not in mode:
+            encoding = io.text_encoding(encoding)
+        return self._accessor.open(self, mode, buffering, encoding, errors,
+                                   newline)
 
     def read_bytes(self):
         """
@@ -1252,6 +1128,7 @@
         """
         Open the file in text mode, read it, and close the file.
         """
+        encoding = io.text_encoding(encoding)
         with self.open(mode='r', encoding=encoding, errors=errors) as f:
             return f.read()
 
@@ -1264,14 +1141,15 @@
         with self.open(mode='wb') as f:
             return f.write(view)
 
-    def write_text(self, data, encoding=None, errors=None):
+    def write_text(self, data, encoding=None, errors=None, newline=None):
         """
         Open the file in text mode, write to it, and close the file.
         """
         if not isinstance(data, str):
             raise TypeError('data must be str, not %s' %
                             data.__class__.__name__)
-        with self.open(mode='w', encoding=encoding, errors=errors) as f:
+        encoding = io.text_encoding(encoding)
+        with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
             return f.write(data)
 
     def readlink(self):
@@ -1279,30 +1157,13 @@
         Return the path to which the symbolic link points.
         """
         path = self._accessor.readlink(self)
-        obj = self._from_parts((path,), init=False)
-        obj._init(template=self)
-        return obj
+        return self._from_parts((path,))
 
     def touch(self, mode=0o666, exist_ok=True):
         """
         Create this file with the given access mode, if it doesn't exist.
         """
-        if exist_ok:
-            # First try to bump modification time
-            # Implementation note: GNU touch uses the UTIME_NOW option of
-            # the utimensat() / futimens() functions.
-            try:
-                self._accessor.utime(self, None)
-            except OSError:
-                # Avoid exception chaining
-                pass
-            else:
-                return
-        flags = os.O_CREAT | os.O_WRONLY
-        if not exist_ok:
-            flags |= os.O_EXCL
-        fd = self._raw_open(flags, mode)
-        os.close(fd)
+        self._accessor.touch(self, mode, exist_ok)
 
     def mkdir(self, mode=0o777, parents=False, exist_ok=False):
         """
@@ -1321,18 +1182,18 @@
             if not exist_ok or not self.is_dir():
                 raise
 
-    def chmod(self, mode):
+    def chmod(self, mode, *, follow_symlinks=True):
         """
         Change the permissions of the path, like os.chmod().
         """
-        self._accessor.chmod(self, mode)
+        self._accessor.chmod(self, mode, follow_symlinks=follow_symlinks)
 
     def lchmod(self, mode):
         """
         Like chmod(), except if the path points to a symlink, the symlink's
         permissions are changed, rather than its target's.
         """
-        self._accessor.lchmod(self, mode)
+        self.chmod(mode, follow_symlinks=False)
 
     def unlink(self, missing_ok=False):
         """
@@ -1356,13 +1217,7 @@
         Like stat(), except if the path points to a symlink, the symlink's
         status information is returned, rather than its target's.
         """
-        return self._accessor.lstat(self)
-
-    def link_to(self, target):
-        """
-        Create a hard link pointing to a path named target.
-        """
-        self._accessor.link_to(self, target)
+        return self.stat(follow_symlinks=False)
 
     def rename(self, target):
         """
@@ -1392,11 +1247,37 @@
 
     def symlink_to(self, target, target_is_directory=False):
         """
-        Make this path a symlink pointing to the given path.
-        Note the order of arguments (self, target) is the reverse of os.symlink's.
+        Make this path a symlink pointing to the target path.
+        Note the order of arguments (link, target) is the reverse of os.symlink.
         """
         self._accessor.symlink(target, self, target_is_directory)
 
+    def hardlink_to(self, target):
+        """
+        Make this path a hard link pointing to the same file as *target*.
+
+        Note the order of arguments (self, target) is the reverse of os.link's.
+        """
+        self._accessor.link(target, self)
+
+    def link_to(self, target):
+        """
+        Make the target path a hard link pointing to this path.
+
+        Note this function does not make this path a hard link to *target*,
+        despite the implication of the function and argument names. The order
+        of arguments (target, link) is the reverse of Path.symlink_to, but
+        matches that of os.link.
+
+        Deprecated since Python 3.10 and scheduled for removal in Python 3.12.
+        Use `hardlink_to()` instead.
+        """
+        warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled "
+                      "for removal in Python 3.12. "
+                      "Use pathlib.Path.hardlink_to() instead.",
+                      DeprecationWarning, stacklevel=2)
+        self._accessor.link(self, target)
+
     # Convenience functions for querying the stat results
 
     def exists(self):
@@ -1424,7 +1305,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1441,7 +1322,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1492,7 +1373,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1508,7 +1389,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1524,7 +1405,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1540,7 +1421,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
             return False
         except ValueError:
             # Non-encodable path
@@ -1552,7 +1433,9 @@
         """
         if (not (self._drv or self._root) and
             self._parts and self._parts[0][:1] == '~'):
-            homedir = self._flavour.gethomedir(self._parts[0][1:])
+            homedir = self._accessor.expanduser(self._parts[0])
+            if homedir[:1] == "~":
+                raise RuntimeError("Could not determine home directory.")
             return self._from_parts([homedir] + self._parts[1:])
 
         return self
diff --git a/Lib/pdb.py b/Lib/pdb.py
index d7d9571..7ab50b4 100644
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -384,8 +384,7 @@
                 sys.stdin = save_stdin
                 sys.displayhook = save_displayhook
         except:
-            exc_info = sys.exc_info()[:2]
-            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
+            self._error_exc()
 
     def precmd(self, line):
         """Handle alias expansion and ';;' separator."""
@@ -752,7 +751,8 @@
         """
         # this method should be callable before starting debugging, so default
         # to "no globals" if there is no current frame
-        globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
+        frame = getattr(self, 'curframe', None)
+        globs = frame.f_globals if frame else None
         line = linecache.getline(filename, lineno, globs)
         if not line:
             self.message('End of file')
@@ -893,7 +893,7 @@
             except ValueError:
                 err = "Invalid line number (%s)" % arg
             else:
-                bplist = self.get_breaks(filename, lineno)
+                bplist = self.get_breaks(filename, lineno)[:]
                 err = self.clear_break(filename, lineno)
             if err:
                 self.error(err)
@@ -1026,7 +1026,11 @@
         if arg:
             import shlex
             argv0 = sys.argv[0:1]
-            sys.argv = shlex.split(arg)
+            try:
+                sys.argv = shlex.split(arg)
+            except ValueError as e:
+                self.error('Cannot run %s: %s' % (arg, e))
+                return
             sys.argv[:0] = argv0
         # this is caught in the main debugger loop
         raise Restart
@@ -1103,8 +1107,7 @@
         try:
             sys.call_tracing(p.run, (arg, globals, locals))
         except Exception:
-            exc_info = sys.exc_info()[:2]
-            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
+            self._error_exc()
         self.message("LEAVING RECURSIVE DEBUGGER")
         sys.settrace(self.trace_dispatch)
         self.lastcmd = p.lastcmd
@@ -1162,8 +1165,7 @@
         try:
             return eval(arg, self.curframe.f_globals, self.curframe_locals)
         except:
-            exc_info = sys.exc_info()[:2]
-            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
+            self._error_exc()
             raise
 
     def _getval_except(self, arg, frame=None):
@@ -1177,23 +1179,31 @@
             err = traceback.format_exception_only(*exc_info)[-1].strip()
             return _rstr('** raised %s **' % err)
 
+    def _error_exc(self):
+        exc_info = sys.exc_info()[:2]
+        self.error(traceback.format_exception_only(*exc_info)[-1].strip())
+
+    def _msg_val_func(self, arg, func):
+        try:
+            val = self._getval(arg)
+        except:
+            return  # _getval() has displayed the error
+        try:
+            self.message(func(val))
+        except:
+            self._error_exc()
+
     def do_p(self, arg):
         """p expression
         Print the value of the expression.
         """
-        try:
-            self.message(repr(self._getval(arg)))
-        except:
-            pass
+        self._msg_val_func(arg, repr)
 
     def do_pp(self, arg):
         """pp expression
         Pretty-print the value of the expression.
         """
-        try:
-            self.message(pprint.pformat(self._getval(arg)))
-        except:
-            pass
+        self._msg_val_func(arg, pprint.pformat)
 
     complete_print = _complete_expression
     complete_p = _complete_expression
@@ -1483,6 +1493,9 @@
                 self.error('No help for %r; please do not run Python with -OO '
                            'if you need command help' % arg)
                 return
+            if command.__doc__ is None:
+                self.error('No help for %r; __doc__ string missing' % arg)
+                return
             self.message(command.__doc__.rstrip())
 
     do_h = do_help
@@ -1684,10 +1697,19 @@
         print('Error:', mainpyfile, 'does not exist')
         sys.exit(1)
 
+    if run_as_module:
+        import runpy
+        try:
+            runpy._get_module_details(mainpyfile)
+        except Exception:
+            traceback.print_exc()
+            sys.exit(1)
+
     sys.argv[:] = args      # Hide "pdb.py" and pdb options from argument list
 
-    # Replace pdb's dir with script's dir in front of module search path.
     if not run_as_module:
+        mainpyfile = os.path.realpath(mainpyfile)
+        # Replace pdb's dir with script's dir in front of module search path.
         sys.path[0] = os.path.dirname(mainpyfile)
 
     # Note on saving/restoring sys.argv: it's a good idea when sys.argv was
@@ -1707,7 +1729,7 @@
             print("The program finished and will be restarted")
         except Restart:
             print("Restarting", mainpyfile, "with arguments:")
-            print("\t" + " ".join(args))
+            print("\t" + " ".join(sys.argv[1:]))
         except SystemExit:
             # In most cases SystemExit does not warrant a post-mortem session.
             print("The program exited via sys.exit(). Exit status:", end=' ')
diff --git a/Lib/pickle.py b/Lib/pickle.py
index e63a8b6..e7f30f2 100644
--- a/Lib/pickle.py
+++ b/Lib/pickle.py
@@ -818,6 +818,7 @@
             self._write_large_bytes(BYTEARRAY8 + pack("<Q", n), obj)
         else:
             self.write(BYTEARRAY8 + pack("<Q", n) + obj)
+        self.memoize(obj)
     dispatch[bytearray] = save_bytearray
 
     if _HAVE_PICKLE_BUFFER:
@@ -1172,7 +1173,7 @@
         used in Python 3.  The *encoding* and *errors* tell pickle how
         to decode 8-bit string instances pickled by Python 2; these
         default to 'ASCII' and 'strict', respectively. *encoding* can be
-        'bytes' to read theses 8-bit string instances as bytes objects.
+        'bytes' to read these 8-bit string instances as bytes objects.
         """
         self._buffers = iter(buffers) if buffers is not None else None
         self._file_readline = file.readline
diff --git a/Lib/pipes.py b/Lib/pipes.py
index f1a16f6..8cc74b0 100644
--- a/Lib/pipes.py
+++ b/Lib/pipes.py
@@ -109,7 +109,7 @@
 
     def append(self, cmd, kind):
         """t.append(cmd, kind) adds a new step at the end."""
-        if type(cmd) is not type(''):
+        if not isinstance(cmd, str):
             raise TypeError('Template.append: cmd must be a string')
         if kind not in stepkinds:
             raise ValueError('Template.append: bad kind %r' % (kind,))
@@ -125,7 +125,7 @@
 
     def prepend(self, cmd, kind):
         """t.prepend(cmd, kind) adds a new step at the front."""
-        if type(cmd) is not type(''):
+        if not isinstance(cmd, str):
             raise TypeError('Template.prepend: cmd must be a string')
         if kind not in stepkinds:
             raise ValueError('Template.prepend: bad kind %r' % (kind,))
diff --git a/Lib/pkgutil.py b/Lib/pkgutil.py
index 4c18467..8e010c7 100644
--- a/Lib/pkgutil.py
+++ b/Lib/pkgutil.py
@@ -7,7 +7,6 @@
 import importlib.machinery
 import os
 import os.path
-import re
 import sys
 from types import ModuleType
 import warnings
@@ -205,7 +204,8 @@
 
     def __init__(self, path=None):
         global imp
-        warnings.warn("This emulation is deprecated, use 'importlib' instead",
+        warnings.warn("This emulation is deprecated and slated for removal "
+                      "in Python 3.12; use 'importlib' instead",
              DeprecationWarning)
         _import_imp()
         self.path = path
@@ -272,7 +272,8 @@
     code = source = None
 
     def __init__(self, fullname, file, filename, etc):
-        warnings.warn("This emulation is deprecated, use 'importlib' instead",
+        warnings.warn("This emulation is deprecated and slated for removal in "
+                      "Python 3.12; use 'importlib' instead",
                       DeprecationWarning)
         _import_imp()
         self.file = file
@@ -638,9 +639,7 @@
     return loader.get_data(resource_name)
 
 
-_DOTTED_WORDS = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
-_NAME_PATTERN = re.compile(f'^(?P<pkg>{_DOTTED_WORDS})(?P<cln>:(?P<obj>{_DOTTED_WORDS})?)?$', re.U)
-del _DOTTED_WORDS
+_NAME_PATTERN = None
 
 def resolve_name(name):
     """
@@ -672,8 +671,17 @@
     ValueError - if `name` isn't in a recognised format
     ImportError - if an import failed when it shouldn't have
     AttributeError - if a failure occurred when traversing the object hierarchy
-                     within the imported package to get to the desired object)
+                     within the imported package to get to the desired object.
     """
+    global _NAME_PATTERN
+    if _NAME_PATTERN is None:
+        # Lazy import to speedup Python startup time
+        import re
+        dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
+        _NAME_PATTERN = re.compile(f'^(?P<pkg>{dotted_words})'
+                                   f'(?P<cln>:(?P<obj>{dotted_words})?)?$',
+                                   re.UNICODE)
+
     m = _NAME_PATTERN.match(name)
     if not m:
         raise ValueError(f'invalid format: {name!r}')
diff --git a/Lib/platform.py b/Lib/platform.py
index e9f50ab..e32f9c1 100644
--- a/Lib/platform.py
+++ b/Lib/platform.py
@@ -174,7 +174,7 @@
         The file is read and scanned in chunks of chunksize bytes.
 
     """
-    if executable is None:
+    if not executable:
         try:
             ver = os.confstr('CS_GNU_LIBC_VERSION')
             # parse 'glibc 2.28' as ('glibc', '2.28')
@@ -239,11 +239,9 @@
     if build:
         l.append(build)
     try:
-        ints = map(int, l)
+        strings = list(map(str, map(int, l)))
     except ValueError:
         strings = l
-    else:
-        strings = list(map(str, ints))
     version = '.'.join(strings[:3])
     return version
 
@@ -282,6 +280,7 @@
     for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
         try:
             info = subprocess.check_output(cmd,
+                                           stdin=subprocess.DEVNULL,
                                            stderr=subprocess.DEVNULL,
                                            text=True,
                                            shell=True)
@@ -365,17 +364,20 @@
         return release, version, csd, ptype
 
     winver = getwindowsversion()
-    maj, min, build = winver.platform_version or winver[:3]
-    version = '{0}.{1}.{2}'.format(maj, min, build)
+    try:
+        major, minor, build = map(int, _syscmd_ver()[2].split('.'))
+    except ValueError:
+        major, minor, build = winver.platform_version or winver[:3]
+    version = '{0}.{1}.{2}'.format(major, minor, build)
 
-    release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
-               _WIN32_CLIENT_RELEASES.get((maj, None)) or
+    release = (_WIN32_CLIENT_RELEASES.get((major, minor)) or
+               _WIN32_CLIENT_RELEASES.get((major, None)) or
                release)
 
     # getwindowsversion() reflect the compatibility mode Python is
     # running under, and so the service pack value is only going to be
     # valid if the versions match.
-    if winver[:2] == (maj, min):
+    if winver[:2] == (major, minor):
         try:
             csd = 'SP{}'.format(winver.service_pack_major)
         except AttributeError:
@@ -384,8 +386,8 @@
 
     # VER_NT_SERVER = 3
     if getattr(winver, 'product_type', None) == 3:
-        release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
-                   _WIN32_SERVER_RELEASES.get((maj, None)) or
+        release = (_WIN32_SERVER_RELEASES.get((major, minor)) or
+                   _WIN32_SERVER_RELEASES.get((major, None)) or
                    release)
 
     try:
@@ -524,16 +526,6 @@
             # XXX Whatever the new SunOS marketing name is...
             system = 'Solaris'
 
-    elif system == 'IRIX64':
-        # IRIX reports IRIX64 on platforms with 64-bit support; yet it
-        # is really a version and not a different platform, since 32-bit
-        # apps are also supported..
-        system = 'IRIX'
-        if version:
-            version = version + ' (64bit)'
-        else:
-            version = '64bit'
-
     elif system in ('win32', 'win16'):
         # In case one of the other tricks
         system = 'Windows'
@@ -698,9 +690,6 @@
     # Bits
     if '32-bit' in fileout:
         bits = '32bit'
-    elif 'N32' in fileout:
-        # On Irix only
-        bits = 'n32bit'
     elif '64-bit' in fileout:
         bits = '64bit'
 
@@ -782,7 +771,7 @@
         ):
     """
     A uname_result that's largely compatible with a
-    simple namedtuple except that 'platform' is
+    simple namedtuple except that 'processor' is
     resolved late and cached to avoid calling "uname"
     except when needed.
     """
@@ -797,12 +786,25 @@
             (self.processor,)
         )
 
+    @classmethod
+    def _make(cls, iterable):
+        # override factory to affect length check
+        num_fields = len(cls._fields)
+        result = cls.__new__(cls, *iterable)
+        if len(result) != num_fields + 1:
+            msg = f'Expected {num_fields} arguments, got {len(result)}'
+            raise TypeError(msg)
+        return result
+
     def __getitem__(self, key):
-        return tuple(iter(self))[key]
+        return tuple(self)[key]
 
     def __len__(self):
         return len(tuple(iter(self)))
 
+    def __reduce__(self):
+        return uname_result, tuple(self)[:len(self._fields)]
+
 
 _uname_cache = None
 
@@ -1243,6 +1245,63 @@
     _platform_cache[(aliased, terse)] = platform
     return platform
 
+### freedesktop.org os-release standard
+# https://www.freedesktop.org/software/systemd/man/os-release.html
+
+# NAME=value with optional quotes (' or "). The regular expression is less
+# strict than shell lexer, but that's ok.
+_os_release_line = re.compile(
+    "^(?P<name>[a-zA-Z0-9_]+)=(?P<quote>[\"\']?)(?P<value>.*)(?P=quote)$"
+)
+# unescape five special characters mentioned in the standard
+_os_release_unescape = re.compile(r"\\([\\\$\"\'`])")
+# /etc takes precedence over /usr/lib
+_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release")
+_os_release_cache = None
+
+
+def _parse_os_release(lines):
+    # These fields are mandatory fields with well-known defaults
+    # in practice all Linux distributions override NAME, ID, and PRETTY_NAME.
+    info = {
+        "NAME": "Linux",
+        "ID": "linux",
+        "PRETTY_NAME": "Linux",
+    }
+
+    for line in lines:
+        mo = _os_release_line.match(line)
+        if mo is not None:
+            info[mo.group('name')] = _os_release_unescape.sub(
+                r"\1", mo.group('value')
+            )
+
+    return info
+
+
+def freedesktop_os_release():
+    """Return operation system identification from freedesktop.org os-release
+    """
+    global _os_release_cache
+
+    if _os_release_cache is None:
+        errno = None
+        for candidate in _os_release_candidates:
+            try:
+                with open(candidate, encoding="utf-8") as f:
+                    _os_release_cache = _parse_os_release(f)
+                break
+            except OSError as e:
+                errno = e.errno
+        else:
+            raise OSError(
+                errno,
+                f"Unable to read files {', '.join(_os_release_candidates)}"
+            )
+
+    return _os_release_cache.copy()
+
+
 ### Command line interface
 
 if __name__ == '__main__':
diff --git a/Lib/posixpath.py b/Lib/posixpath.py
index ecb4e5a..1953746 100644
--- a/Lib/posixpath.py
+++ b/Lib/posixpath.py
@@ -262,6 +262,9 @@
             # password database, return the path unchanged
             return path
         userhome = pwent.pw_dir
+    # if no user home, return the path unchanged on VxWorks
+    if userhome is None and sys.platform == "vxworks":
+        return path
     if isinstance(path, bytes):
         userhome = os.fsencode(userhome)
         root = b'/'
@@ -349,6 +352,7 @@
     initial_slashes = path.startswith(sep)
     # POSIX allows one or two initial slashes, but treats three or more
     # as single slash.
+    # (see http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13)
     if (initial_slashes and
         path.startswith(sep*2) and not path.startswith(sep*3)):
         initial_slashes = 2
@@ -384,16 +388,16 @@
 # Return a canonical path (i.e. the absolute location of a file on the
 # filesystem).
 
-def realpath(filename):
+def realpath(filename, *, strict=False):
     """Return the canonical path of the specified filename, eliminating any
 symbolic links encountered in the path."""
     filename = os.fspath(filename)
-    path, ok = _joinrealpath(filename[:0], filename, {})
+    path, ok = _joinrealpath(filename[:0], filename, strict, {})
     return abspath(path)
 
 # Join two paths, normalizing and eliminating any symbolic links
 # encountered in the second path.
-def _joinrealpath(path, rest, seen):
+def _joinrealpath(path, rest, strict, seen):
     if isinstance(path, bytes):
         sep = b'/'
         curdir = b'.'
@@ -422,7 +426,15 @@
                 path = pardir
             continue
         newpath = join(path, name)
-        if not islink(newpath):
+        try:
+            st = os.lstat(newpath)
+        except OSError:
+            if strict:
+                raise
+            is_link = False
+        else:
+            is_link = stat.S_ISLNK(st.st_mode)
+        if not is_link:
             path = newpath
             continue
         # Resolve the symbolic link
@@ -433,10 +445,14 @@
                 # use cached value
                 continue
             # The symlink is not resolved, so we must have a symlink loop.
-            # Return already resolved part + rest of the path unchanged.
-            return join(newpath, rest), False
+            if strict:
+                # Raise OSError(errno.ELOOP)
+                os.stat(newpath)
+            else:
+                # Return already resolved part + rest of the path unchanged.
+                return join(newpath, rest), False
         seen[newpath] = None # not resolved symlink
-        path, ok = _joinrealpath(path, os.readlink(newpath), seen)
+        path, ok = _joinrealpath(path, os.readlink(newpath), strict, seen)
         if not ok:
             return join(path, rest), False
         seen[newpath] = path # resolved symlink
diff --git a/Lib/pprint.py b/Lib/pprint.py
index 7c1118a..d91421f 100644
--- a/Lib/pprint.py
+++ b/Lib/pprint.py
@@ -35,6 +35,7 @@
 """
 
 import collections as _collections
+import dataclasses as _dataclasses
 import re
 import sys as _sys
 import types as _types
@@ -45,18 +46,20 @@
 
 
 def pprint(object, stream=None, indent=1, width=80, depth=None, *,
-           compact=False, sort_dicts=True):
+           compact=False, sort_dicts=True, underscore_numbers=False):
     """Pretty-print a Python object to a stream [default is sys.stdout]."""
     printer = PrettyPrinter(
         stream=stream, indent=indent, width=width, depth=depth,
-        compact=compact, sort_dicts=sort_dicts)
+        compact=compact, sort_dicts=sort_dicts,
+        underscore_numbers=underscore_numbers)
     printer.pprint(object)
 
 def pformat(object, indent=1, width=80, depth=None, *,
-            compact=False, sort_dicts=True):
+            compact=False, sort_dicts=True, underscore_numbers=False):
     """Format a Python object into a pretty-printed representation."""
     return PrettyPrinter(indent=indent, width=width, depth=depth,
-                         compact=compact, sort_dicts=sort_dicts).pformat(object)
+                         compact=compact, sort_dicts=sort_dicts,
+                         underscore_numbers=underscore_numbers).pformat(object)
 
 def pp(object, *args, sort_dicts=False, **kwargs):
     """Pretty-print a Python object"""
@@ -64,15 +67,15 @@
 
 def saferepr(object):
     """Version of repr() which can handle recursive data structures."""
-    return _safe_repr(object, {}, None, 0, True)[0]
+    return PrettyPrinter()._safe_repr(object, {}, None, 0)[0]
 
 def isreadable(object):
     """Determine if saferepr(object) is readable by eval()."""
-    return _safe_repr(object, {}, None, 0, True)[1]
+    return PrettyPrinter()._safe_repr(object, {}, None, 0)[1]
 
 def isrecursive(object):
     """Determine if object requires a recursive representation."""
-    return _safe_repr(object, {}, None, 0, True)[2]
+    return PrettyPrinter()._safe_repr(object, {}, None, 0)[2]
 
 class _safe_key:
     """Helper function for key functions when sorting unorderable objects.
@@ -102,7 +105,7 @@
 
 class PrettyPrinter:
     def __init__(self, indent=1, width=80, depth=None, stream=None, *,
-                 compact=False, sort_dicts=True):
+                 compact=False, sort_dicts=True, underscore_numbers=False):
         """Handle pretty printing operations onto a stream using a set of
         configured parameters.
 
@@ -143,6 +146,7 @@
             self._stream = _sys.stdout
         self._compact = bool(compact)
         self._sort_dicts = sort_dicts
+        self._underscore_numbers = underscore_numbers
 
     def pprint(self, object):
         self._format(object, self._stream, 0, 0, {}, 0)
@@ -176,14 +180,26 @@
                 p(self, object, stream, indent, allowance, context, level + 1)
                 del context[objid]
                 return
-            elif isinstance(object, dict):
+            elif (_dataclasses.is_dataclass(object) and
+                  not isinstance(object, type) and
+                  object.__dataclass_params__.repr and
+                  # Check dataclass has generated repr method.
+                  hasattr(object.__repr__, "__wrapped__") and
+                  "__create_fn__" in object.__repr__.__wrapped__.__qualname__):
                 context[objid] = 1
-                self._pprint_dict(object, stream, indent, allowance,
-                                  context, level + 1)
+                self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
                 del context[objid]
                 return
         stream.write(rep)
 
+    def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
+        cls_name = object.__class__.__name__
+        indent += len(cls_name) + 1
+        items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
+        stream.write(cls_name + '(')
+        self._format_namespace_items(items, stream, indent, allowance, context, level)
+        stream.write(')')
+
     _dispatch = {}
 
     def _pprint_dict(self, object, stream, indent, allowance, context, level):
@@ -350,21 +366,9 @@
         else:
             cls_name = object.__class__.__name__
         indent += len(cls_name) + 1
-        delimnl = ',\n' + ' ' * indent
         items = object.__dict__.items()
-        last_index = len(items) - 1
-
         stream.write(cls_name + '(')
-        for i, (key, ent) in enumerate(items):
-            stream.write(key)
-            stream.write('=')
-
-            last = i == last_index
-            self._format(ent, stream, indent + len(key) + 1,
-                         allowance if last else 1,
-                         context, level)
-            if not last:
-                stream.write(delimnl)
+        self._format_namespace_items(items, stream, indent, allowance, context, level)
         stream.write(')')
 
     _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
@@ -386,6 +390,25 @@
             if not last:
                 write(delimnl)
 
+    def _format_namespace_items(self, items, stream, indent, allowance, context, level):
+        write = stream.write
+        delimnl = ',\n' + ' ' * indent
+        last_index = len(items) - 1
+        for i, (key, ent) in enumerate(items):
+            last = i == last_index
+            write(key)
+            write('=')
+            if id(ent) in context:
+                # Special-case representation of recursion to match standard
+                # recursive dataclass repr.
+                write("...")
+            else:
+                self._format(ent, stream, indent + len(key) + 1,
+                             allowance if last else 1,
+                             context, level)
+            if not last:
+                write(delimnl)
+
     def _format_items(self, items, stream, indent, allowance, context, level):
         write = stream.write
         indent += self._indent_per_level
@@ -441,7 +464,7 @@
         and flags indicating whether the representation is 'readable'
         and whether the object represents a recursive construct.
         """
-        return _safe_repr(object, context, maxlevels, level, self._sort_dicts)
+        return self._safe_repr(object, context, maxlevels, level)
 
     def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
         if not len(object):
@@ -524,79 +547,88 @@
 
     _dispatch[_collections.UserString.__repr__] = _pprint_user_string
 
-# Return triple (repr_string, isreadable, isrecursive).
+    def _safe_repr(self, object, context, maxlevels, level):
+        # Return triple (repr_string, isreadable, isrecursive).
+        typ = type(object)
+        if typ in _builtin_scalars:
+            return repr(object), True, False
 
-def _safe_repr(object, context, maxlevels, level, sort_dicts):
-    typ = type(object)
-    if typ in _builtin_scalars:
-        return repr(object), True, False
+        r = getattr(typ, "__repr__", None)
 
-    r = getattr(typ, "__repr__", None)
-    if issubclass(typ, dict) and r is dict.__repr__:
-        if not object:
-            return "{}", True, False
-        objid = id(object)
-        if maxlevels and level >= maxlevels:
-            return "{...}", False, objid in context
-        if objid in context:
-            return _recursion(object), False, True
-        context[objid] = 1
-        readable = True
-        recursive = False
-        components = []
-        append = components.append
-        level += 1
-        if sort_dicts:
-            items = sorted(object.items(), key=_safe_tuple)
-        else:
-            items = object.items()
-        for k, v in items:
-            krepr, kreadable, krecur = _safe_repr(k, context, maxlevels, level, sort_dicts)
-            vrepr, vreadable, vrecur = _safe_repr(v, context, maxlevels, level, sort_dicts)
-            append("%s: %s" % (krepr, vrepr))
-            readable = readable and kreadable and vreadable
-            if krecur or vrecur:
-                recursive = True
-        del context[objid]
-        return "{%s}" % ", ".join(components), readable, recursive
+        if issubclass(typ, int) and r is int.__repr__:
+            if self._underscore_numbers:
+                return f"{object:_d}", True, False
+            else:
+                return repr(object), True, False
 
-    if (issubclass(typ, list) and r is list.__repr__) or \
-       (issubclass(typ, tuple) and r is tuple.__repr__):
-        if issubclass(typ, list):
+        if issubclass(typ, dict) and r is dict.__repr__:
             if not object:
-                return "[]", True, False
-            format = "[%s]"
-        elif len(object) == 1:
-            format = "(%s,)"
-        else:
-            if not object:
-                return "()", True, False
-            format = "(%s)"
-        objid = id(object)
-        if maxlevels and level >= maxlevels:
-            return format % "...", False, objid in context
-        if objid in context:
-            return _recursion(object), False, True
-        context[objid] = 1
-        readable = True
-        recursive = False
-        components = []
-        append = components.append
-        level += 1
-        for o in object:
-            orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level, sort_dicts)
-            append(orepr)
-            if not oreadable:
-                readable = False
-            if orecur:
-                recursive = True
-        del context[objid]
-        return format % ", ".join(components), readable, recursive
+                return "{}", True, False
+            objid = id(object)
+            if maxlevels and level >= maxlevels:
+                return "{...}", False, objid in context
+            if objid in context:
+                return _recursion(object), False, True
+            context[objid] = 1
+            readable = True
+            recursive = False
+            components = []
+            append = components.append
+            level += 1
+            if self._sort_dicts:
+                items = sorted(object.items(), key=_safe_tuple)
+            else:
+                items = object.items()
+            for k, v in items:
+                krepr, kreadable, krecur = self.format(
+                    k, context, maxlevels, level)
+                vrepr, vreadable, vrecur = self.format(
+                    v, context, maxlevels, level)
+                append("%s: %s" % (krepr, vrepr))
+                readable = readable and kreadable and vreadable
+                if krecur or vrecur:
+                    recursive = True
+            del context[objid]
+            return "{%s}" % ", ".join(components), readable, recursive
 
-    rep = repr(object)
-    return rep, (rep and not rep.startswith('<')), False
+        if (issubclass(typ, list) and r is list.__repr__) or \
+           (issubclass(typ, tuple) and r is tuple.__repr__):
+            if issubclass(typ, list):
+                if not object:
+                    return "[]", True, False
+                format = "[%s]"
+            elif len(object) == 1:
+                format = "(%s,)"
+            else:
+                if not object:
+                    return "()", True, False
+                format = "(%s)"
+            objid = id(object)
+            if maxlevels and level >= maxlevels:
+                return format % "...", False, objid in context
+            if objid in context:
+                return _recursion(object), False, True
+            context[objid] = 1
+            readable = True
+            recursive = False
+            components = []
+            append = components.append
+            level += 1
+            for o in object:
+                orepr, oreadable, orecur = self.format(
+                    o, context, maxlevels, level)
+                append(orepr)
+                if not oreadable:
+                    readable = False
+                if orecur:
+                    recursive = True
+            del context[objid]
+            return format % ", ".join(components), readable, recursive
 
-_builtin_scalars = frozenset({str, bytes, bytearray, int, float, complex,
+        rep = repr(object)
+        return rep, (rep and not rep.startswith('<')), False
+
+_builtin_scalars = frozenset({str, bytes, bytearray, float, complex,
                               bool, type(None)})
 
 def _recursion(object):
@@ -610,7 +642,7 @@
         object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
     p = PrettyPrinter()
     t1 = time.perf_counter()
-    _safe_repr(object, {}, None, 0, True)
+    p._safe_repr(object, {}, None, 0, True)
     t2 = time.perf_counter()
     p.pformat(object)
     t3 = time.perf_counter()
diff --git a/Lib/profile.py b/Lib/profile.py
index 5cb017e..d8599fb 100644
--- a/Lib/profile.py
+++ b/Lib/profile.py
@@ -595,7 +595,12 @@
                 '__package__': None,
                 '__cached__': None,
             }
-        runctx(code, globs, None, options.outfile, options.sort)
+        try:
+            runctx(code, globs, None, options.outfile, options.sort)
+        except BrokenPipeError as exc:
+            # Prevent "Exception ignored" during interpreter shutdown.
+            sys.stdout = None
+            sys.exit(exc.errno)
     else:
         parser.print_usage()
     return parser
diff --git a/Lib/pty.py b/Lib/pty.py
index a324320..8d8ce40 100644
--- a/Lib/pty.py
+++ b/Lib/pty.py
@@ -1,7 +1,7 @@
 """Pseudo terminal utilities."""
 
 # Bugs: No signal handling.  Doesn't set slave termios and window size.
-#       Only tested on Linux.
+#       Only tested on Linux, FreeBSD, and macOS.
 # See:  W. Richard Stevens. 1992.  Advanced Programming in the
 #       UNIX Environment.  Chapter 19.
 # Author: Steen Lumholt -- with additions by Guido.
@@ -11,7 +11,11 @@
 import sys
 import tty
 
-__all__ = ["openpty","fork","spawn"]
+# names imported directly for test mocking purposes
+from os import close, waitpid
+from tty import setraw, tcgetattr, tcsetattr
+
+__all__ = ["openpty", "fork", "spawn"]
 
 STDIN_FILENO = 0
 STDOUT_FILENO = 1
@@ -105,8 +109,8 @@
         os.dup2(slave_fd, STDIN_FILENO)
         os.dup2(slave_fd, STDOUT_FILENO)
         os.dup2(slave_fd, STDERR_FILENO)
-        if (slave_fd > STDERR_FILENO):
-            os.close (slave_fd)
+        if slave_fd > STDERR_FILENO:
+            os.close(slave_fd)
 
         # Explicitly open the tty to make it become a controlling tty.
         tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
@@ -133,14 +137,22 @@
             pty master -> standard output   (master_read)
             standard input -> pty master    (stdin_read)"""
     fds = [master_fd, STDIN_FILENO]
-    while True:
-        rfds, wfds, xfds = select(fds, [], [])
+    while fds:
+        rfds, _wfds, _xfds = select(fds, [], [])
+
         if master_fd in rfds:
-            data = master_read(master_fd)
+            # Some OSes signal EOF by returning an empty byte string,
+            # some throw OSErrors.
+            try:
+                data = master_read(master_fd)
+            except OSError:
+                data = b""
             if not data:  # Reached EOF.
-                fds.remove(master_fd)
+                return    # Assume the child process has exited and is
+                          # unreachable, so we clean up.
             else:
                 os.write(STDOUT_FILENO, data)
+
         if STDIN_FILENO in rfds:
             data = stdin_read(STDIN_FILENO)
             if not data:
@@ -153,20 +165,23 @@
     if type(argv) == type(''):
         argv = (argv,)
     sys.audit('pty.spawn', argv)
+
     pid, master_fd = fork()
     if pid == CHILD:
         os.execlp(argv[0], *argv)
+
     try:
-        mode = tty.tcgetattr(STDIN_FILENO)
-        tty.setraw(STDIN_FILENO)
-        restore = 1
+        mode = tcgetattr(STDIN_FILENO)
+        setraw(STDIN_FILENO)
+        restore = True
     except tty.error:    # This is the same as termios.error
-        restore = 0
+        restore = False
+
     try:
         _copy(master_fd, master_read, stdin_read)
-    except OSError:
+    finally:
         if restore:
-            tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
+            tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
 
-    os.close(master_fd)
-    return os.waitpid(pid, 0)[1]
+    close(master_fd)
+    return waitpid(pid, 0)[1]
diff --git a/Lib/py_compile.py b/Lib/py_compile.py
index a81f493..388614e 100644
--- a/Lib/py_compile.py
+++ b/Lib/py_compile.py
@@ -173,43 +173,40 @@
     return cfile
 
 
-def main(args=None):
-    """Compile several source files.
+def main():
+    import argparse
 
-    The files named in 'args' (or on the command line, if 'args' is
-    not specified) are compiled and the resulting bytecode is cached
-    in the normal manner.  This function does not search a directory
-    structure to locate source files; it only compiles files named
-    explicitly.  If '-' is the only parameter in args, the list of
-    files is taken from standard input.
-
-    """
-    if args is None:
-        args = sys.argv[1:]
-    rv = 0
-    if args == ['-']:
-        while True:
-            filename = sys.stdin.readline()
-            if not filename:
-                break
-            filename = filename.rstrip('\n')
-            try:
-                compile(filename, doraise=True)
-            except PyCompileError as error:
-                rv = 1
-                sys.stderr.write("%s\n" % error.msg)
-            except OSError as error:
-                rv = 1
-                sys.stderr.write("%s\n" % error)
+    description = 'A simple command-line interface for py_compile module.'
+    parser = argparse.ArgumentParser(description=description)
+    parser.add_argument(
+        '-q', '--quiet',
+        action='store_true',
+        help='Suppress error output',
+    )
+    parser.add_argument(
+        'filenames',
+        nargs='+',
+        help='Files to compile',
+    )
+    args = parser.parse_args()
+    if args.filenames == ['-']:
+        filenames = [filename.rstrip('\n') for filename in sys.stdin.readlines()]
     else:
-        for filename in args:
-            try:
-                compile(filename, doraise=True)
-            except PyCompileError as error:
-                # return value to indicate at least one failure
-                rv = 1
-                sys.stderr.write("%s\n" % error.msg)
-    return rv
+        filenames = args.filenames
+    for filename in filenames:
+        try:
+            compile(filename, doraise=True)
+        except PyCompileError as error:
+            if args.quiet:
+                parser.exit(1)
+            else:
+                parser.exit(1, error.msg)
+        except OSError as error:
+            if args.quiet:
+                parser.exit(1)
+            else:
+                parser.exit(1, str(error))
+
 
 if __name__ == "__main__":
-    sys.exit(main())
+    main()
diff --git a/Lib/pyclbr.py b/Lib/pyclbr.py
index 99a1734..37f8699 100644
--- a/Lib/pyclbr.py
+++ b/Lib/pyclbr.py
@@ -21,11 +21,14 @@
     name    -- name of the object;
     file    -- file in which the object is defined;
     lineno  -- line in the file where the object's definition starts;
+    end_lineno -- line in the file where the object's definition ends;
     parent  -- parent of this object, if any;
     children -- nested objects contained in this object.
 The 'children' attribute is a dictionary mapping names to objects.
 
-Instances of Function describe functions with the attributes from _Object.
+Instances of Function describe functions with the attributes from _Object,
+plus the following:
+    is_async -- if a function is defined with an 'async' prefix
 
 Instances of Class describe classes with the attributes from _Object,
 plus the following:
@@ -38,11 +41,9 @@
 shouldn't happen often.
 """
 
-import io
+import ast
 import sys
 import importlib.util
-import tokenize
-from token import NAME, DEDENT, OP
 
 __all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
 
@@ -51,48 +52,50 @@
 
 class _Object:
     "Information about Python class or function."
-    def __init__(self, module, name, file, lineno, parent):
+    def __init__(self, module, name, file, lineno, end_lineno, parent):
         self.module = module
         self.name = name
         self.file = file
         self.lineno = lineno
+        self.end_lineno = end_lineno
         self.parent = parent
         self.children = {}
-
-    def _addchild(self, name, obj):
-        self.children[name] = obj
+        if parent is not None:
+            parent.children[name] = self
 
 
+# Odd Function and Class signatures are for back-compatibility.
 class Function(_Object):
     "Information about a Python function, including methods."
-    def __init__(self, module, name, file, lineno, parent=None):
-        _Object.__init__(self, module, name, file, lineno, parent)
+    def __init__(self, module, name, file, lineno,
+                 parent=None, is_async=False, *, end_lineno=None):
+        super().__init__(module, name, file, lineno, end_lineno, parent)
+        self.is_async = is_async
+        if isinstance(parent, Class):
+            parent.methods[name] = lineno
 
 
 class Class(_Object):
     "Information about a Python class."
-    def __init__(self, module, name, super, file, lineno, parent=None):
-        _Object.__init__(self, module, name, file, lineno, parent)
-        self.super = [] if super is None else super
+    def __init__(self, module, name, super_, file, lineno,
+                 parent=None, *, end_lineno=None):
+        super().__init__(module, name, file, lineno, end_lineno, parent)
+        self.super = super_ or []
         self.methods = {}
 
-    def _addmethod(self, name, lineno):
-        self.methods[name] = lineno
 
-
-def _nest_function(ob, func_name, lineno):
+# These 2 functions are used in these tests
+# Lib/test/test_pyclbr, Lib/idlelib/idle_test/test_browser.py
+def _nest_function(ob, func_name, lineno, end_lineno, is_async=False):
     "Return a Function after nesting within ob."
-    newfunc = Function(ob.module, func_name, ob.file, lineno, ob)
-    ob._addchild(func_name, newfunc)
-    if isinstance(ob, Class):
-        ob._addmethod(func_name, lineno)
-    return newfunc
+    return Function(ob.module, func_name, ob.file, lineno,
+                    parent=ob, is_async=is_async, end_lineno=end_lineno)
 
-def _nest_class(ob, class_name, lineno, super=None):
+def _nest_class(ob, class_name, lineno, end_lineno, super=None):
     "Return a Class after nesting within ob."
-    newclass = Class(ob.module, class_name, super, ob.file, lineno, ob)
-    ob._addchild(class_name, newclass)
-    return newclass
+    return Class(ob.module, class_name, super, ob.file, lineno,
+                 parent=ob, end_lineno=end_lineno)
+
 
 def readmodule(module, path=None):
     """Return Class objects for the top-level classes in module.
@@ -115,6 +118,7 @@
     """
     return _readmodule(module, path or [])
 
+
 def _readmodule(module, path, inpackage=None):
     """Do the hard work for readmodule[_ex].
 
@@ -179,187 +183,93 @@
     return _create_tree(fullmodule, path, fname, source, tree, inpackage)
 
 
-def _create_tree(fullmodule, path, fname, source, tree, inpackage):
-    """Return the tree for a particular module.
+class _ModuleBrowser(ast.NodeVisitor):
+    def __init__(self, module, path, file, tree, inpackage):
+        self.path = path
+        self.tree = tree
+        self.file = file
+        self.module = module
+        self.inpackage = inpackage
+        self.stack = []
 
-    fullmodule (full module name), inpackage+module, becomes o.module.
-    path is passed to recursive calls of _readmodule.
-    fname becomes o.file.
-    source is tokenized.  Imports cause recursive calls to _readmodule.
-    tree is {} or {'__path__': <submodule search locations>}.
-    inpackage, None or string, is passed to recursive calls of _readmodule.
+    def visit_ClassDef(self, node):
+        bases = []
+        for base in node.bases:
+            name = ast.unparse(base)
+            if name in self.tree:
+                # We know this super class.
+                bases.append(self.tree[name])
+            elif len(names := name.split(".")) > 1:
+                # Super class form is module.class:
+                # look in module for class.
+                *_, module, class_ = names
+                if module in _modules:
+                    bases.append(_modules[module].get(class_, name))
+            else:
+                bases.append(name)
 
-    The effect of recursive calls is mutation of global _modules.
-    """
-    f = io.StringIO(source)
+        parent = self.stack[-1] if self.stack else None
+        class_ = Class(self.module, node.name, bases, self.file, node.lineno,
+                       parent=parent, end_lineno=node.end_lineno)
+        if parent is None:
+            self.tree[node.name] = class_
+        self.stack.append(class_)
+        self.generic_visit(node)
+        self.stack.pop()
 
-    stack = [] # Initialize stack of (class, indent) pairs.
+    def visit_FunctionDef(self, node, *, is_async=False):
+        parent = self.stack[-1] if self.stack else None
+        function = Function(self.module, node.name, self.file, node.lineno,
+                            parent, is_async, end_lineno=node.end_lineno)
+        if parent is None:
+            self.tree[node.name] = function
+        self.stack.append(function)
+        self.generic_visit(node)
+        self.stack.pop()
 
-    g = tokenize.generate_tokens(f.readline)
-    try:
-        for tokentype, token, start, _end, _line in g:
-            if tokentype == DEDENT:
-                lineno, thisindent = start
-                # Close previous nested classes and defs.
-                while stack and stack[-1][1] >= thisindent:
-                    del stack[-1]
-            elif token == 'def':
-                lineno, thisindent = start
-                # Close previous nested classes and defs.
-                while stack and stack[-1][1] >= thisindent:
-                    del stack[-1]
-                tokentype, func_name, start = next(g)[0:3]
-                if tokentype != NAME:
-                    continue  # Skip def with syntax error.
-                cur_func = None
-                if stack:
-                    cur_obj = stack[-1][0]
-                    cur_func = _nest_function(cur_obj, func_name, lineno)
-                else:
-                    # It is just a function.
-                    cur_func = Function(fullmodule, func_name, fname, lineno)
-                    tree[func_name] = cur_func
-                stack.append((cur_func, thisindent))
-            elif token == 'class':
-                lineno, thisindent = start
-                # Close previous nested classes and defs.
-                while stack and stack[-1][1] >= thisindent:
-                    del stack[-1]
-                tokentype, class_name, start = next(g)[0:3]
-                if tokentype != NAME:
-                    continue # Skip class with syntax error.
-                # Parse what follows the class name.
-                tokentype, token, start = next(g)[0:3]
-                inherit = None
-                if token == '(':
-                    names = [] # Initialize list of superclasses.
-                    level = 1
-                    super = [] # Tokens making up current superclass.
-                    while True:
-                        tokentype, token, start = next(g)[0:3]
-                        if token in (')', ',') and level == 1:
-                            n = "".join(super)
-                            if n in tree:
-                                # We know this super class.
-                                n = tree[n]
-                            else:
-                                c = n.split('.')
-                                if len(c) > 1:
-                                    # Super class form is module.class:
-                                    # look in module for class.
-                                    m = c[-2]
-                                    c = c[-1]
-                                    if m in _modules:
-                                        d = _modules[m]
-                                        if c in d:
-                                            n = d[c]
-                            names.append(n)
-                            super = []
-                        if token == '(':
-                            level += 1
-                        elif token == ')':
-                            level -= 1
-                            if level == 0:
-                                break
-                        elif token == ',' and level == 1:
-                            pass
-                        # Only use NAME and OP (== dot) tokens for type name.
-                        elif tokentype in (NAME, OP) and level == 1:
-                            super.append(token)
-                        # Expressions in the base list are not supported.
-                    inherit = names
-                if stack:
-                    cur_obj = stack[-1][0]
-                    cur_class = _nest_class(
-                            cur_obj, class_name, lineno, inherit)
-                else:
-                    cur_class = Class(fullmodule, class_name, inherit,
-                                      fname, lineno)
-                    tree[class_name] = cur_class
-                stack.append((cur_class, thisindent))
-            elif token == 'import' and start[1] == 0:
-                modules = _getnamelist(g)
-                for mod, _mod2 in modules:
-                    try:
-                        # Recursively read the imported module.
-                        if inpackage is None:
-                            _readmodule(mod, path)
-                        else:
-                            try:
-                                _readmodule(mod, path, inpackage)
-                            except ImportError:
-                                _readmodule(mod, [])
-                    except:
-                        # If we can't find or parse the imported module,
-                        # too bad -- don't die here.
-                        pass
-            elif token == 'from' and start[1] == 0:
-                mod, token = _getname(g)
-                if not mod or token != "import":
-                    continue
-                names = _getnamelist(g)
+    def visit_AsyncFunctionDef(self, node):
+        self.visit_FunctionDef(node, is_async=True)
+
+    def visit_Import(self, node):
+        if node.col_offset != 0:
+            return
+
+        for module in node.names:
+            try:
                 try:
-                    # Recursively read the imported module.
-                    d = _readmodule(mod, path, inpackage)
-                except:
-                    # If we can't find or parse the imported module,
-                    # too bad -- don't die here.
-                    continue
-                # Add any classes that were defined in the imported module
-                # to our name space if they were mentioned in the list.
-                for n, n2 in names:
-                    if n in d:
-                        tree[n2 or n] = d[n]
-                    elif n == '*':
-                        # Don't add names that start with _.
-                        for n in d:
-                            if n[0] != '_':
-                                tree[n] = d[n]
-    except StopIteration:
-        pass
+                    _readmodule(module.name, self.path, self.inpackage)
+                except ImportError:
+                    _readmodule(module.name, [])
+            except (ImportError, SyntaxError):
+                # If we can't find or parse the imported module,
+                # too bad -- don't die here.
+                continue
 
-    f.close()
-    return tree
+    def visit_ImportFrom(self, node):
+        if node.col_offset != 0:
+            return
+        try:
+            module = "." * node.level
+            if node.module:
+                module += node.module
+            module = _readmodule(module, self.path, self.inpackage)
+        except (ImportError, SyntaxError):
+            return
+
+        for name in node.names:
+            if name.name in module:
+                self.tree[name.asname or name.name] = module[name.name]
+            elif name.name == "*":
+                for import_name, import_value in module.items():
+                    if import_name.startswith("_"):
+                        continue
+                    self.tree[import_name] = import_value
 
 
-def _getnamelist(g):
-    """Return list of (dotted-name, as-name or None) tuples for token source g.
-
-    An as-name is the name that follows 'as' in an as clause.
-    """
-    names = []
-    while True:
-        name, token = _getname(g)
-        if not name:
-            break
-        if token == 'as':
-            name2, token = _getname(g)
-        else:
-            name2 = None
-        names.append((name, name2))
-        while token != "," and "\n" not in token:
-            token = next(g)[1]
-        if token != ",":
-            break
-    return names
-
-
-def _getname(g):
-    "Return (dotted-name or None, next-token) tuple for token source g."
-    parts = []
-    tokentype, token = next(g)[0:2]
-    if tokentype != NAME and token != '*':
-        return (None, token)
-    parts.append(token)
-    while True:
-        tokentype, token = next(g)[0:2]
-        if token != '.':
-            break
-        tokentype, token = next(g)[0:2]
-        if tokentype != NAME:
-            break
-        parts.append(token)
-    return (".".join(parts), token)
+def _create_tree(fullmodule, path, fname, source, tree, inpackage):
+    mbrowser = _ModuleBrowser(fullmodule, path, fname, tree, inpackage)
+    mbrowser.visit(ast.parse(source))
+    return mbrowser.tree
 
 
 def _main():
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index 35ef3eb..4a8c10a 100644
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -23,7 +23,7 @@
 local machine.  Port number 0 can be used to get an arbitrary unused port.
 
 Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
-open a Web browser to interactively browse documentation.  Combine with
+open a web browser to interactively browse documentation.  Combine with
 the -n and -p options to control the hostname and port used.
 
 Run "pydoc -w <name>" to write out the HTML documentation for a module
@@ -504,7 +504,7 @@
               not file.startswith(os.path.join(basedir, 'site-packages')))) and
             object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
             if docloc.startswith(("http://", "https://")):
-                docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
+                docloc = "{}/{}.html".format(docloc.rstrip("/"), object.__name__.lower())
             else:
                 docloc = os.path.join(docloc, object.__name__.lower() + ".html")
         else:
@@ -694,7 +694,7 @@
                 url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif pep:
-                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
+                url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif selfdot:
                 # Create a link for methods like 'self.method(...)'
@@ -1594,9 +1594,10 @@
 def pipepager(text, cmd):
     """Page through text by feeding it to another program."""
     import subprocess
-    proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
+    proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+                            errors='backslashreplace')
     try:
-        with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
+        with proc.stdin as pipe:
             try:
                 pipe.write(text)
             except KeyboardInterrupt:
@@ -1617,13 +1618,14 @@
 def tempfilepager(text, cmd):
     """Page through text by invoking a program on a temporary file."""
     import tempfile
-    filename = tempfile.mktemp()
-    with open(filename, 'w', errors='backslashreplace') as file:
-        file.write(text)
-    try:
+    with tempfile.TemporaryDirectory() as tempdir:
+        filename = os.path.join(tempdir, 'pydoc.out')
+        with open(filename, 'w', errors='backslashreplace',
+                  encoding=os.device_encoding(0) if
+                  sys.platform == 'win32' else None
+                  ) as file:
+            file.write(text)
         os.system(cmd + ' "' + filename + '"')
-    finally:
-        os.unlink(filename)
 
 def _escape_stdout(text):
     # Escape non-encodable characters to avoid encoding errors later
@@ -1817,7 +1819,6 @@
         'False': '',
         'None': '',
         'True': '',
-        '__peg_parser__': '',
         'and': 'BOOLEAN',
         'as': 'with',
         'assert': ('assert', ''),
@@ -2065,7 +2066,7 @@
 Welcome to Python {0}'s help utility!
 
 If this is your first time using Python, you should definitely check out
-the tutorial on the Internet at https://docs.python.org/{0}/tutorial/.
+the tutorial on the internet at https://docs.python.org/{0}/tutorial/.
 
 Enter the name of any module, keyword, or topic to get help on writing
 Python programs and using Python modules.  To quit this help utility and
@@ -2279,13 +2280,13 @@
         warnings.filterwarnings('ignore') # ignore problems during import
         ModuleScanner().run(callback, key, onerror=onerror)
 
-# --------------------------------------- enhanced Web browser interface
+# --------------------------------------- enhanced web browser interface
 
 def _start_server(urlhandler, hostname, port):
     """Start an HTTP server thread on a specific port.
 
     Start an HTML/text server thread, so HTML or text documents can be
-    browsed dynamically and interactively with a Web browser.  Example use:
+    browsed dynamically and interactively with a web browser.  Example use:
 
         >>> import time
         >>> import pydoc
@@ -2457,9 +2458,6 @@
 %s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
 </body></html>''' % (title, css_link, html_navbar(), contents)
 
-        def filelink(self, url, path):
-            return '<a href="getfile?key=%s">%s</a>' % (url, path)
-
 
     html = _HTMLDoc()
 
@@ -2545,19 +2543,6 @@
             'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
         return 'Search Results', contents
 
-    def html_getfile(path):
-        """Get and display a source file listing safely."""
-        path = urllib.parse.unquote(path)
-        with tokenize.open(path) as fp:
-            lines = html.escape(fp.read())
-        body = '<pre>%s</pre>' % lines
-        heading = html.heading(
-            '<big><big><strong>File Listing</strong></big></big>',
-            '#ffffff', '#7799ee')
-        contents = heading + html.bigsection(
-            'File: %s' % path, '#ffffff', '#ee77aa', body)
-        return 'getfile %s' % path, contents
-
     def html_topics():
         """Index of topic texts available."""
 
@@ -2649,8 +2634,6 @@
                 op, _, url = url.partition('=')
                 if op == "search?key":
                     title, content = html_search(url)
-                elif op == "getfile?key":
-                    title, content = html_getfile(url)
                 elif op == "topic?key":
                     # try topics first, then objects.
                     try:
@@ -2689,7 +2672,7 @@
 
 
 def browse(port=0, *, open_browser=True, hostname='localhost'):
-    """Start the enhanced pydoc Web server and open a Web browser.
+    """Start the enhanced pydoc web server and open a web browser.
 
     Use port '0' to start the server on an arbitrary port.
     Set open_browser to False to suppress opening a browser.
@@ -2841,7 +2824,7 @@
     number 0 can be used to get an arbitrary unused port.
 
 {cmd} -b
-    Start an HTTP server on an arbitrary unused port and open a Web browser
+    Start an HTTP server on an arbitrary unused port and open a web browser
     to interactively browse documentation.  This option can be used in
     combination with -n and/or -p.
 
diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py
index d8dd8c5..ac7d16c 100644
--- a/Lib/pydoc_data/topics.py
+++ b/Lib/pydoc_data/topics.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Mon Dec  7 15:00:07 2020
+# Autogenerated by Sphinx on Wed Mar 16 11:26:55 2022
 topics = {'assert': 'The "assert" statement\n'
            '**********************\n'
            '\n'
@@ -433,11 +433,9 @@
           '\n'
           'Execution of Python coroutines can be suspended and resumed at '
           'many\n'
-          'points (see *coroutine*).  Inside the body of a coroutine '
-          'function,\n'
-          '"await" and "async" identifiers become reserved keywords; "await"\n'
-          'expressions, "async for" and "async with" can only be used in\n'
-          'coroutine function bodies.\n'
+          'points (see *coroutine*). "await" expressions, "async for" and '
+          '"async\n'
+          'with" can only be used in the body of a coroutine function.\n'
           '\n'
           'Functions defined with "async def" syntax are always coroutine\n'
           'functions, even if they do not contain "await" or "async" '
@@ -453,19 +451,22 @@
           '       do_stuff()\n'
           '       await some_coroutine()\n'
           '\n'
+          'Changed in version 3.7: "await" and "async" are now keywords;\n'
+          'previously they were only treated as such inside the body of a\n'
+          'coroutine function.\n'
+          '\n'
           '\n'
           'The "async for" statement\n'
           '=========================\n'
           '\n'
           '   async_for_stmt ::= "async" for_stmt\n'
           '\n'
-          'An *asynchronous iterable* is able to call asynchronous code in '
-          'its\n'
-          '*iter* implementation, and *asynchronous iterator* can call\n'
-          'asynchronous code in its *next* method.\n'
+          'An *asynchronous iterable* provides an "__aiter__" method that\n'
+          'directly returns an *asynchronous iterator*, which can call\n'
+          'asynchronous code in its "__anext__" method.\n'
           '\n'
           'The "async for" statement allows convenient iteration over\n'
-          'asynchronous iterators.\n'
+          'asynchronous iterables.\n'
           '\n'
           'The following code:\n'
           '\n'
@@ -550,13 +551,65 @@
           'exception.\n'
           '    That new exception causes the old one to be lost.\n'
           '\n'
-          '[2] A string literal appearing as the first statement in the '
+          '[2] In pattern matching, a sequence is defined as one of the\n'
+          '    following:\n'
+          '\n'
+          '       * a class that inherits from "collections.abc.Sequence"\n'
+          '\n'
+          '       * a Python class that has been registered as\n'
+          '         "collections.abc.Sequence"\n'
+          '\n'
+          '       * a builtin class that has its (CPython) '
+          '"Py_TPFLAGS_SEQUENCE"\n'
+          '         bit set\n'
+          '\n'
+          '       * a class that inherits from any of the above\n'
+          '\n'
+          '    The following standard library classes are sequences:\n'
+          '\n'
+          '       * "array.array"\n'
+          '\n'
+          '       * "collections.deque"\n'
+          '\n'
+          '       * "list"\n'
+          '\n'
+          '       * "memoryview"\n'
+          '\n'
+          '       * "range"\n'
+          '\n'
+          '       * "tuple"\n'
+          '\n'
+          '    Note:\n'
+          '\n'
+          '      Subject values of type "str", "bytes", and "bytearray" do '
+          'not\n'
+          '      match sequence patterns.\n'
+          '\n'
+          '[3] In pattern matching, a mapping is defined as one of the '
+          'following:\n'
+          '\n'
+          '       * a class that inherits from "collections.abc.Mapping"\n'
+          '\n'
+          '       * a Python class that has been registered as\n'
+          '         "collections.abc.Mapping"\n'
+          '\n'
+          '       * a builtin class that has its (CPython) '
+          '"Py_TPFLAGS_MAPPING"\n'
+          '         bit set\n'
+          '\n'
+          '       * a class that inherits from any of the above\n'
+          '\n'
+          '    The standard library classes "dict" and '
+          '"types.MappingProxyType"\n'
+          '    are mappings.\n'
+          '\n'
+          '[4] A string literal appearing as the first statement in the '
           'function\n'
           '    body is transformed into the function’s "__doc__" attribute '
           'and\n'
           '    therefore the function’s *docstring*.\n'
           '\n'
-          '[3] A string literal appearing as the first statement in the class\n'
+          '[5] A string literal appearing as the first statement in the class\n'
           '    body is transformed into the namespace’s "__doc__" item and\n'
           '    therefore the class’s *docstring*.\n',
  'atom-identifiers': 'Identifiers (Names)\n'
@@ -883,32 +936,6 @@
                      '*instance* of the\n'
                      '   owner class.\n'
                      '\n'
-                     'object.__set_name__(self, owner, name)\n'
-                     '\n'
-                     '   Called at the time the owning class *owner* is '
-                     'created. The\n'
-                     '   descriptor has been assigned to *name*.\n'
-                     '\n'
-                     '   Note:\n'
-                     '\n'
-                     '     "__set_name__()" is only called implicitly as part '
-                     'of the "type"\n'
-                     '     constructor, so it will need to be called '
-                     'explicitly with the\n'
-                     '     appropriate parameters when a descriptor is added '
-                     'to a class\n'
-                     '     after initial creation:\n'
-                     '\n'
-                     '        class A:\n'
-                     '           pass\n'
-                     '        descr = custom_descriptor()\n'
-                     '        A.attr = descr\n'
-                     "        descr.__set_name__(A, 'attr')\n"
-                     '\n'
-                     '     See Creating the class object for more details.\n'
-                     '\n'
-                     '   New in version 3.6.\n'
-                     '\n'
                      'The attribute "__objclass__" is interpreted by the '
                      '"inspect" module as\n'
                      'specifying the class where this object was defined '
@@ -980,16 +1007,16 @@
                      '"super(B,\n'
                      '   obj).m()" searches "obj.__class__.__mro__" for the '
                      'base class "A"\n'
-                     '   immediately preceding "B" and then invokes the '
+                     '   immediately following "B" and then invokes the '
                      'descriptor with the\n'
                      '   call: "A.__dict__[\'m\'].__get__(obj, '
                      'obj.__class__)".\n'
                      '\n'
                      'For instance bindings, the precedence of descriptor '
                      'invocation depends\n'
-                     'on the which descriptor methods are defined.  A '
-                     'descriptor can define\n'
-                     'any combination of "__get__()", "__set__()" and '
+                     'on which descriptor methods are defined.  A descriptor '
+                     'can define any\n'
+                     'combination of "__get__()", "__set__()" and '
                      '"__delete__()".  If it\n'
                      'does not define "__get__()", then accessing the '
                      'attribute will return\n'
@@ -1011,14 +1038,15 @@
                      'can be\n'
                      'overridden by instances.\n'
                      '\n'
-                     'Python methods (including "staticmethod()" and '
-                     '"classmethod()") are\n'
-                     'implemented as non-data descriptors.  Accordingly, '
-                     'instances can\n'
-                     'redefine and override methods.  This allows individual '
-                     'instances to\n'
-                     'acquire behaviors that differ from other instances of '
-                     'the same class.\n'
+                     'Python methods (including those decorated with '
+                     '"@staticmethod" and\n'
+                     '"@classmethod") are implemented as non-data '
+                     'descriptors.  Accordingly,\n'
+                     'instances can redefine and override methods.  This '
+                     'allows individual\n'
+                     'instances to acquire behaviors that differ from other '
+                     'instances of the\n'
+                     'same class.\n'
                      '\n'
                      'The "property()" function is implemented as a data '
                      'descriptor.\n'
@@ -1031,12 +1059,12 @@
                      '\n'
                      '*__slots__* allow us to explicitly declare data members '
                      '(like\n'
-                     'properties) and deny the creation of *__dict__* and '
+                     'properties) and deny the creation of "__dict__" and '
                      '*__weakref__*\n'
                      '(unless explicitly declared in *__slots__* or available '
                      'in a parent.)\n'
                      '\n'
-                     'The space saved over using *__dict__* can be '
+                     'The space saved over using "__dict__" can be '
                      'significant. Attribute\n'
                      'lookup speed can be significantly improved as well.\n'
                      '\n'
@@ -1048,7 +1076,7 @@
                      '*__slots__*\n'
                      '   reserves space for the declared variables and '
                      'prevents the\n'
-                     '   automatic creation of *__dict__* and *__weakref__* '
+                     '   automatic creation of "__dict__" and *__weakref__* '
                      'for each\n'
                      '   instance.\n'
                      '\n'
@@ -1057,11 +1085,11 @@
                      '--------------------------\n'
                      '\n'
                      '* When inheriting from a class without *__slots__*, the '
-                     '*__dict__* and\n'
+                     '"__dict__" and\n'
                      '  *__weakref__* attribute of the instances will always '
                      'be accessible.\n'
                      '\n'
-                     '* Without a *__dict__* variable, instances cannot be '
+                     '* Without a "__dict__" variable, instances cannot be '
                      'assigned new\n'
                      '  variables not listed in the *__slots__* definition.  '
                      'Attempts to\n'
@@ -1075,28 +1103,28 @@
                      '\n'
                      '* Without a *__weakref__* variable for each instance, '
                      'classes defining\n'
-                     '  *__slots__* do not support weak references to its '
-                     'instances. If weak\n'
-                     '  reference support is needed, then add '
+                     '  *__slots__* do not support "weak references" to its '
+                     'instances. If\n'
+                     '  weak reference support is needed, then add '
                      '"\'__weakref__\'" to the\n'
                      '  sequence of strings in the *__slots__* declaration.\n'
                      '\n'
                      '* *__slots__* are implemented at the class level by '
                      'creating\n'
-                     '  descriptors (Implementing Descriptors) for each '
-                     'variable name.  As a\n'
-                     '  result, class attributes cannot be used to set default '
-                     'values for\n'
-                     '  instance variables defined by *__slots__*; otherwise, '
-                     'the class\n'
-                     '  attribute would overwrite the descriptor assignment.\n'
+                     '  descriptors for each variable name.  As a result, '
+                     'class attributes\n'
+                     '  cannot be used to set default values for instance '
+                     'variables defined\n'
+                     '  by *__slots__*; otherwise, the class attribute would '
+                     'overwrite the\n'
+                     '  descriptor assignment.\n'
                      '\n'
                      '* The action of a *__slots__* declaration is not limited '
                      'to the class\n'
                      '  where it is defined.  *__slots__* declared in parents '
                      'are available\n'
                      '  in child classes. However, child subclasses will get a '
-                     '*__dict__*\n'
+                     '"__dict__"\n'
                      '  and *__weakref__* unless they also define *__slots__* '
                      '(which should\n'
                      '  only contain names of any *additional* slots).\n'
@@ -1116,13 +1144,19 @@
                      '  “variable-length” built-in types such as "int", '
                      '"bytes" and "tuple".\n'
                      '\n'
-                     '* Any non-string iterable may be assigned to '
-                     '*__slots__*. Mappings may\n'
-                     '  also be used; however, in the future, special meaning '
-                     'may be\n'
-                     '  assigned to the values corresponding to each key.\n'
+                     '* Any non-string *iterable* may be assigned to '
+                     '*__slots__*.\n'
                      '\n'
-                     '* *__class__* assignment works only if both classes have '
+                     '* If a "dictionary" is used to assign *__slots__*, the '
+                     'dictionary keys\n'
+                     '  will be used as the slot names. The values of the '
+                     'dictionary can be\n'
+                     '  used to provide per-attribute docstrings that will be '
+                     'recognised by\n'
+                     '  "inspect.getdoc()" and displayed in the output of '
+                     '"help()".\n'
+                     '\n'
+                     '* "__class__" assignment works only if both classes have '
                      'the same\n'
                      '  *__slots__*.\n'
                      '\n'
@@ -1134,10 +1168,10 @@
                      'violations\n'
                      '  raise "TypeError".\n'
                      '\n'
-                     '* If an iterator is used for *__slots__* then a '
-                     'descriptor is created\n'
-                     '  for each of the iterator’s values. However, the '
-                     '*__slots__*\n'
+                     '* If an *iterator* is used for *__slots__* then a '
+                     '*descriptor* is\n'
+                     '  created for each of the iterator’s values. However, '
+                     'the *__slots__*\n'
                      '  attribute will be an empty iterator.\n',
  'attribute-references': 'Attribute references\n'
                          '********************\n'
@@ -1260,6 +1294,10 @@
            'In the latter case, sequence repetition is performed; a negative\n'
            'repetition factor yields an empty sequence.\n'
            '\n'
+           'This operation can be customized using the special "__mul__()" '
+           'and\n'
+           '"__rmul__()" methods.\n'
+           '\n'
            'The "@" (at) operator is intended to be used for matrix\n'
            'multiplication.  No builtin Python types implement this operator.\n'
            '\n'
@@ -1275,6 +1313,10 @@
            'result.  Division by zero raises the "ZeroDivisionError" '
            'exception.\n'
            '\n'
+           'This operation can be customized using the special "__truediv__()" '
+           'and\n'
+           '"__floordiv__()" methods.\n'
+           '\n'
            'The "%" (modulo) operator yields the remainder from the division '
            'of\n'
            'the first argument by the second.  The numeric arguments are '
@@ -1306,6 +1348,10 @@
            'string formatting is described in the Python Library Reference,\n'
            'section printf-style String Formatting.\n'
            '\n'
+           'The *modulo* operation can be customized using the special '
+           '"__mod__()"\n'
+           'method.\n'
+           '\n'
            'The floor division operator, the modulo operator, and the '
            '"divmod()"\n'
            'function are not defined for complex numbers.  Instead, convert to '
@@ -1320,9 +1366,16 @@
            'and then added together. In the latter case, the sequences are\n'
            'concatenated.\n'
            '\n'
+           'This operation can be customized using the special "__add__()" '
+           'and\n'
+           '"__radd__()" methods.\n'
+           '\n'
            'The "-" (subtraction) operator yields the difference of its '
            'arguments.\n'
-           'The numeric arguments are first converted to a common type.\n',
+           'The numeric arguments are first converted to a common type.\n'
+           '\n'
+           'This operation can be customized using the special "__sub__()" '
+           'method.\n',
  'bitwise': 'Binary bitwise operations\n'
             '*************************\n'
             '\n'
@@ -1335,14 +1388,18 @@
             '\n'
             'The "&" operator yields the bitwise AND of its arguments, which '
             'must\n'
-            'be integers.\n'
+            'be integers or one of them must be a custom object overriding\n'
+            '"__and__()" or "__rand__()" special methods.\n'
             '\n'
             'The "^" operator yields the bitwise XOR (exclusive OR) of its\n'
-            'arguments, which must be integers.\n'
+            'arguments, which must be integers or one of them must be a '
+            'custom\n'
+            'object overriding "__xor__()" or "__rxor__()" special methods.\n'
             '\n'
             'The "|" operator yields the bitwise (inclusive) OR of its '
             'arguments,\n'
-            'which must be integers.\n',
+            'which must be integers or one of them must be a custom object\n'
+            'overriding "__or__()" or "__ror__()" special methods.\n',
  'bltin-code-objects': 'Code Objects\n'
                        '************\n'
                        '\n'
@@ -1359,6 +1416,10 @@
                        'through their "__code__" attribute. See also the '
                        '"code" module.\n'
                        '\n'
+                       'Accessing "__code__" raises an auditing event '
+                       '"object.__getattr__"\n'
+                       'with arguments "obj" and ""__code__"".\n'
+                       '\n'
                        'A code object can be executed or evaluated by passing '
                        'it (instead of a\n'
                        'source string) to the "exec()" or "eval()"  built-in '
@@ -1703,7 +1764,7 @@
           'original global namespace. (Usually, the suite contains mostly\n'
           'function definitions.)  When the class’s suite finishes execution, '
           'its\n'
-          'execution frame is discarded but its local namespace is saved. [3] '
+          'execution frame is discarded but its local namespace is saved. [5] '
           'A\n'
           'class object is then created using the inheritance list for the '
           'base\n'
@@ -1784,7 +1845,11 @@
                 '   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n'
                 '                     | "is" ["not"] | ["not"] "in"\n'
                 '\n'
-                'Comparisons yield boolean values: "True" or "False".\n'
+                'Comparisons yield boolean values: "True" or "False". Custom '
+                '*rich\n'
+                'comparison methods* may return non-boolean values. In this '
+                'case Python\n'
+                'will call "bool()" on such value in boolean contexts.\n'
                 '\n'
                 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" '
                 'is\n'
@@ -2182,6 +2247,7 @@
              '                     | for_stmt\n'
              '                     | try_stmt\n'
              '                     | with_stmt\n'
+             '                     | match_stmt\n'
              '                     | funcdef\n'
              '                     | classdef\n'
              '                     | async_with_stmt\n'
@@ -2319,33 +2385,6 @@
              ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, '
              '2]".\n'
              '\n'
-             'Note:\n'
-             '\n'
-             '  There is a subtlety when the sequence is being modified by the '
-             'loop\n'
-             '  (this can only occur for mutable sequences, e.g. lists).  An\n'
-             '  internal counter is used to keep track of which item is used '
-             'next,\n'
-             '  and this is incremented on each iteration.  When this counter '
-             'has\n'
-             '  reached the length of the sequence the loop terminates.  This '
-             'means\n'
-             '  that if the suite deletes the current (or a previous) item '
-             'from the\n'
-             '  sequence, the next item will be skipped (since it gets the '
-             'index of\n'
-             '  the current item which has already been treated).  Likewise, '
-             'if the\n'
-             '  suite inserts an item in the sequence before the current item, '
-             'the\n'
-             '  current item will be treated again the next time through the '
-             'loop.\n'
-             '  This can lead to nasty bugs that can be avoided by making a\n'
-             '  temporary copy using a slice of the whole sequence, e.g.,\n'
-             '\n'
-             '     for x in a[:]:\n'
-             '         if x < 0: a.remove(x)\n'
-             '\n'
              '\n'
              'The "try" statement\n'
              '===================\n'
@@ -2381,8 +2420,9 @@
              'compatible\n'
              'with an exception if it is the class or a base class of the '
              'exception\n'
-             'object or a tuple containing an item compatible with the '
-             'exception.\n'
+             'object, or a tuple containing an item that is the class or a '
+             'base\n'
+             'class of the exception object.\n'
              '\n'
              'If no except clause matches the exception, the search for an '
              'exception\n'
@@ -2449,11 +2489,32 @@
              '(see\n'
              'section The standard type hierarchy) identifying the point in '
              'the\n'
-             'program where the exception occurred.  "sys.exc_info()" values '
-             'are\n'
-             'restored to their previous values (before the call) when '
-             'returning\n'
-             'from a function that handled an exception.\n'
+             'program where the exception occurred.  The details about the '
+             'exception\n'
+             'accessed via "sys.exc_info()" are restored to their previous '
+             'values\n'
+             'when leaving an exception handler:\n'
+             '\n'
+             '   >>> print(sys.exc_info())\n'
+             '   (None, None, None)\n'
+             '   >>> try:\n'
+             '   ...     raise TypeError\n'
+             '   ... except:\n'
+             '   ...     print(sys.exc_info())\n'
+             '   ...     try:\n'
+             '   ...          raise ValueError\n'
+             '   ...     except:\n'
+             '   ...         print(sys.exc_info())\n'
+             '   ...     print(sys.exc_info())\n'
+             '   ...\n'
+             "   (<class 'TypeError'>, TypeError(), <traceback object at "
+             '0x10efad080>)\n'
+             "   (<class 'ValueError'>, ValueError(), <traceback object at "
+             '0x10efad040>)\n'
+             "   (<class 'TypeError'>, TypeError(), <traceback object at "
+             '0x10efad080>)\n'
+             '   >>> print(sys.exc_info())\n'
+             '   (None, None, None)\n'
              '\n'
              'The optional "else" clause is executed if the control flow '
              'leaves the\n'
@@ -2538,8 +2599,10 @@
              'usage\n'
              'patterns to be encapsulated for convenient reuse.\n'
              '\n'
-             '   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
-             '   with_item ::= expression ["as" target]\n'
+             '   with_stmt          ::= "with" ( "(" with_stmt_contents ","? '
+             '")" | with_stmt_contents ) ":" suite\n'
+             '   with_stmt_contents ::= with_item ("," with_item)*\n'
+             '   with_item          ::= expression ["as" target]\n'
              '\n'
              'The execution of the "with" statement with one “item” proceeds '
              'as\n'
@@ -2631,9 +2694,23 @@
              '       with B() as b:\n'
              '           SUITE\n'
              '\n'
+             'You can also write multi-item context managers in multiple lines '
+             'if\n'
+             'the items are surrounded by parentheses. For example:\n'
+             '\n'
+             '   with (\n'
+             '       A() as a,\n'
+             '       B() as b,\n'
+             '   ):\n'
+             '       SUITE\n'
+             '\n'
              'Changed in version 3.1: Support for multiple context '
              'expressions.\n'
              '\n'
+             'Changed in version 3.10: Support for using grouping parentheses '
+             'to\n'
+             'break the statement in multiple lines.\n'
+             '\n'
              'See also:\n'
              '\n'
              '  **PEP 343** - The “with” statement\n'
@@ -2642,6 +2719,746 @@
              '     statement.\n'
              '\n'
              '\n'
+             'The "match" statement\n'
+             '=====================\n'
+             '\n'
+             'New in version 3.10.\n'
+             '\n'
+             'The match statement is used for pattern matching.  Syntax:\n'
+             '\n'
+             '   match_stmt   ::= \'match\' subject_expr ":" NEWLINE INDENT '
+             'case_block+ DEDENT\n'
+             '   subject_expr ::= star_named_expression "," '
+             'star_named_expressions?\n'
+             '                    | named_expression\n'
+             '   case_block   ::= \'case\' patterns [guard] ":" block\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  This section uses single quotes to denote soft keywords.\n'
+             '\n'
+             'Pattern matching takes a pattern as input (following "case") and '
+             'a\n'
+             'subject value (following "match").  The pattern (which may '
+             'contain\n'
+             'subpatterns) is matched against the subject value.  The outcomes '
+             'are:\n'
+             '\n'
+             '* A match success or failure (also termed a pattern success or\n'
+             '  failure).\n'
+             '\n'
+             '* Possible binding of matched values to a name.  The '
+             'prerequisites for\n'
+             '  this are further discussed below.\n'
+             '\n'
+             'The "match" and "case" keywords are soft keywords.\n'
+             '\n'
+             'See also:\n'
+             '\n'
+             '  * **PEP 634** – Structural Pattern Matching: Specification\n'
+             '\n'
+             '  * **PEP 636** – Structural Pattern Matching: Tutorial\n'
+             '\n'
+             '\n'
+             'Overview\n'
+             '--------\n'
+             '\n'
+             'Here’s an overview of the logical flow of a match statement:\n'
+             '\n'
+             '1. The subject expression "subject_expr" is evaluated and a '
+             'resulting\n'
+             '   subject value obtained. If the subject expression contains a '
+             'comma,\n'
+             '   a tuple is constructed using the standard rules.\n'
+             '\n'
+             '2. Each pattern in a "case_block" is attempted to match with '
+             'the\n'
+             '   subject value. The specific rules for success or failure are\n'
+             '   described below. The match attempt can also bind some or all '
+             'of the\n'
+             '   standalone names within the pattern. The precise pattern '
+             'binding\n'
+             '   rules vary per pattern type and are specified below.  **Name\n'
+             '   bindings made during a successful pattern match outlive the\n'
+             '   executed block and can be used after the match statement**.\n'
+             '\n'
+             '      Note:\n'
+             '\n'
+             '        During failed pattern matches, some subpatterns may '
+             'succeed.\n'
+             '        Do not rely on bindings being made for a failed match.\n'
+             '        Conversely, do not rely on variables remaining unchanged '
+             'after\n'
+             '        a failed match.  The exact behavior is dependent on\n'
+             '        implementation and may vary.  This is an intentional '
+             'decision\n'
+             '        made to allow different implementations to add '
+             'optimizations.\n'
+             '\n'
+             '3. If the pattern succeeds, the corresponding guard (if present) '
+             'is\n'
+             '   evaluated. In this case all name bindings are guaranteed to '
+             'have\n'
+             '   happened.\n'
+             '\n'
+             '   * If the guard evaluates as true or is missing, the "block" '
+             'inside\n'
+             '     "case_block" is executed.\n'
+             '\n'
+             '   * Otherwise, the next "case_block" is attempted as described '
+             'above.\n'
+             '\n'
+             '   * If there are no further case blocks, the match statement '
+             'is\n'
+             '     completed.\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  Users should generally never rely on a pattern being '
+             'evaluated.\n'
+             '  Depending on implementation, the interpreter may cache values '
+             'or use\n'
+             '  other optimizations which skip repeated evaluations.\n'
+             '\n'
+             'A sample match statement:\n'
+             '\n'
+             '   >>> flag = False\n'
+             '   >>> match (100, 200):\n'
+             '   ...    case (100, 300):  # Mismatch: 200 != 300\n'
+             "   ...        print('Case 1')\n"
+             '   ...    case (100, 200) if flag:  # Successful match, but '
+             'guard fails\n'
+             "   ...        print('Case 2')\n"
+             '   ...    case (100, y):  # Matches and binds y to 200\n'
+             "   ...        print(f'Case 3, y: {y}')\n"
+             '   ...    case _:  # Pattern not attempted\n'
+             "   ...        print('Case 4, I match anything!')\n"
+             '   ...\n'
+             '   Case 3, y: 200\n'
+             '\n'
+             'In this case, "if flag" is a guard.  Read more about that in the '
+             'next\n'
+             'section.\n'
+             '\n'
+             '\n'
+             'Guards\n'
+             '------\n'
+             '\n'
+             '   guard ::= "if" named_expression\n'
+             '\n'
+             'A "guard" (which is part of the "case") must succeed for code '
+             'inside\n'
+             'the "case" block to execute.  It takes the form: "if" followed '
+             'by an\n'
+             'expression.\n'
+             '\n'
+             'The logical flow of a "case" block with a "guard" follows:\n'
+             '\n'
+             '1. Check that the pattern in the "case" block succeeded.  If '
+             'the\n'
+             '   pattern failed, the "guard" is not evaluated and the next '
+             '"case"\n'
+             '   block is checked.\n'
+             '\n'
+             '2. If the pattern succeeded, evaluate the "guard".\n'
+             '\n'
+             '   * If the "guard" condition evaluates as true, the case block '
+             'is\n'
+             '     selected.\n'
+             '\n'
+             '   * If the "guard" condition evaluates as false, the case block '
+             'is\n'
+             '     not selected.\n'
+             '\n'
+             '   * If the "guard" raises an exception during evaluation, the\n'
+             '     exception bubbles up.\n'
+             '\n'
+             'Guards are allowed to have side effects as they are '
+             'expressions.\n'
+             'Guard evaluation must proceed from the first to the last case '
+             'block,\n'
+             'one at a time, skipping case blocks whose pattern(s) don’t all\n'
+             'succeed. (I.e., guard evaluation must happen in order.) Guard\n'
+             'evaluation must stop once a case block is selected.\n'
+             '\n'
+             '\n'
+             'Irrefutable Case Blocks\n'
+             '-----------------------\n'
+             '\n'
+             'An irrefutable case block is a match-all case block.  A match\n'
+             'statement may have at most one irrefutable case block, and it '
+             'must be\n'
+             'last.\n'
+             '\n'
+             'A case block is considered irrefutable if it has no guard and '
+             'its\n'
+             'pattern is irrefutable.  A pattern is considered irrefutable if '
+             'we can\n'
+             'prove from its syntax alone that it will always succeed.  Only '
+             'the\n'
+             'following patterns are irrefutable:\n'
+             '\n'
+             '* AS Patterns whose left-hand side is irrefutable\n'
+             '\n'
+             '* OR Patterns containing at least one irrefutable pattern\n'
+             '\n'
+             '* Capture Patterns\n'
+             '\n'
+             '* Wildcard Patterns\n'
+             '\n'
+             '* parenthesized irrefutable patterns\n'
+             '\n'
+             '\n'
+             'Patterns\n'
+             '--------\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  This section uses grammar notations beyond standard EBNF:\n'
+             '\n'
+             '  * the notation "SEP.RULE+" is shorthand for "RULE (SEP '
+             'RULE)*"\n'
+             '\n'
+             '  * the notation "!RULE" is shorthand for a negative lookahead\n'
+             '    assertion\n'
+             '\n'
+             'The top-level syntax for "patterns" is:\n'
+             '\n'
+             '   patterns       ::= open_sequence_pattern | pattern\n'
+             '   pattern        ::= as_pattern | or_pattern\n'
+             '   closed_pattern ::= | literal_pattern\n'
+             '                      | capture_pattern\n'
+             '                      | wildcard_pattern\n'
+             '                      | value_pattern\n'
+             '                      | group_pattern\n'
+             '                      | sequence_pattern\n'
+             '                      | mapping_pattern\n'
+             '                      | class_pattern\n'
+             '\n'
+             'The descriptions below will include a description “in simple '
+             'terms” of\n'
+             'what a pattern does for illustration purposes (credits to '
+             'Raymond\n'
+             'Hettinger for a document that inspired most of the '
+             'descriptions). Note\n'
+             'that these descriptions are purely for illustration purposes and '
+             '**may\n'
+             'not** reflect the underlying implementation.  Furthermore, they '
+             'do not\n'
+             'cover all valid forms.\n'
+             '\n'
+             '\n'
+             'OR Patterns\n'
+             '~~~~~~~~~~~\n'
+             '\n'
+             'An OR pattern is two or more patterns separated by vertical bars '
+             '"|".\n'
+             'Syntax:\n'
+             '\n'
+             '   or_pattern ::= "|".closed_pattern+\n'
+             '\n'
+             'Only the final subpattern may be irrefutable, and each '
+             'subpattern must\n'
+             'bind the same set of names to avoid ambiguity.\n'
+             '\n'
+             'An OR pattern matches each of its subpatterns in turn to the '
+             'subject\n'
+             'value, until one succeeds.  The OR pattern is then considered\n'
+             'successful.  Otherwise, if none of the subpatterns succeed, the '
+             'OR\n'
+             'pattern fails.\n'
+             '\n'
+             'In simple terms, "P1 | P2 | ..." will try to match "P1", if it '
+             'fails\n'
+             'it will try to match "P2", succeeding immediately if any '
+             'succeeds,\n'
+             'failing otherwise.\n'
+             '\n'
+             '\n'
+             'AS Patterns\n'
+             '~~~~~~~~~~~\n'
+             '\n'
+             'An AS pattern matches an OR pattern on the left of the "as" '
+             'keyword\n'
+             'against a subject.  Syntax:\n'
+             '\n'
+             '   as_pattern ::= or_pattern "as" capture_pattern\n'
+             '\n'
+             'If the OR pattern fails, the AS pattern fails.  Otherwise, the '
+             'AS\n'
+             'pattern binds the subject to the name on the right of the as '
+             'keyword\n'
+             'and succeeds. "capture_pattern" cannot be a a "_".\n'
+             '\n'
+             'In simple terms "P as NAME" will match with "P", and on success '
+             'it\n'
+             'will set "NAME = <subject>".\n'
+             '\n'
+             '\n'
+             'Literal Patterns\n'
+             '~~~~~~~~~~~~~~~~\n'
+             '\n'
+             'A literal pattern corresponds to most literals in Python.  '
+             'Syntax:\n'
+             '\n'
+             '   literal_pattern ::= signed_number\n'
+             '                       | signed_number "+" NUMBER\n'
+             '                       | signed_number "-" NUMBER\n'
+             '                       | strings\n'
+             '                       | "None"\n'
+             '                       | "True"\n'
+             '                       | "False"\n'
+             '                       | signed_number: NUMBER | "-" NUMBER\n'
+             '\n'
+             'The rule "strings" and the token "NUMBER" are defined in the '
+             'standard\n'
+             'Python grammar.  Triple-quoted strings are supported.  Raw '
+             'strings and\n'
+             'byte strings are supported.  Formatted string literals are not\n'
+             'supported.\n'
+             '\n'
+             'The forms "signed_number \'+\' NUMBER" and "signed_number \'-\' '
+             'NUMBER"\n'
+             'are for expressing complex numbers; they require a real number '
+             'on the\n'
+             'left and an imaginary number on the right. E.g. "3 + 4j".\n'
+             '\n'
+             'In simple terms, "LITERAL" will succeed only if "<subject> ==\n'
+             'LITERAL". For the singletons "None", "True" and "False", the '
+             '"is"\n'
+             'operator is used.\n'
+             '\n'
+             '\n'
+             'Capture Patterns\n'
+             '~~~~~~~~~~~~~~~~\n'
+             '\n'
+             'A capture pattern binds the subject value to a name. Syntax:\n'
+             '\n'
+             "   capture_pattern ::= !'_' NAME\n"
+             '\n'
+             'A single underscore "_" is not a capture pattern (this is what '
+             '"!\'_\'"\n'
+             'expresses). It is instead treated as a "wildcard_pattern".\n'
+             '\n'
+             'In a given pattern, a given name can only be bound once.  E.g. '
+             '"case\n'
+             'x, x: ..." is invalid while "case [x] | x: ..." is allowed.\n'
+             '\n'
+             'Capture patterns always succeed.  The binding follows scoping '
+             'rules\n'
+             'established by the assignment expression operator in **PEP '
+             '572**; the\n'
+             'name becomes a local variable in the closest containing function '
+             'scope\n'
+             'unless there’s an applicable "global" or "nonlocal" statement.\n'
+             '\n'
+             'In simple terms "NAME" will always succeed and it will set "NAME '
+             '=\n'
+             '<subject>".\n'
+             '\n'
+             '\n'
+             'Wildcard Patterns\n'
+             '~~~~~~~~~~~~~~~~~\n'
+             '\n'
+             'A wildcard pattern always succeeds (matches anything) and binds '
+             'no\n'
+             'name.  Syntax:\n'
+             '\n'
+             "   wildcard_pattern ::= '_'\n"
+             '\n'
+             '"_" is a soft keyword within any pattern, but only within '
+             'patterns.\n'
+             'It is an identifier, as usual, even within "match" subject\n'
+             'expressions, "guard"s, and "case" blocks.\n'
+             '\n'
+             'In simple terms, "_" will always succeed.\n'
+             '\n'
+             '\n'
+             'Value Patterns\n'
+             '~~~~~~~~~~~~~~\n'
+             '\n'
+             'A value pattern represents a named value in Python. Syntax:\n'
+             '\n'
+             '   value_pattern ::= attr\n'
+             '   attr          ::= name_or_attr "." NAME\n'
+             '   name_or_attr  ::= attr | NAME\n'
+             '\n'
+             'The dotted name in the pattern is looked up using standard '
+             'Python name\n'
+             'resolution rules.  The pattern succeeds if the value found '
+             'compares\n'
+             'equal to the subject value (using the "==" equality operator).\n'
+             '\n'
+             'In simple terms "NAME1.NAME2" will succeed only if "<subject> '
+             '==\n'
+             'NAME1.NAME2"\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  If the same value occurs multiple times in the same match '
+             'statement,\n'
+             '  the interpreter may cache the first value found and reuse it '
+             'rather\n'
+             '  than repeat the same lookup.  This cache is strictly tied to a '
+             'given\n'
+             '  execution of a given match statement.\n'
+             '\n'
+             '\n'
+             'Group Patterns\n'
+             '~~~~~~~~~~~~~~\n'
+             '\n'
+             'A group pattern allows users to add parentheses around patterns '
+             'to\n'
+             'emphasize the intended grouping.  Otherwise, it has no '
+             'additional\n'
+             'syntax. Syntax:\n'
+             '\n'
+             '   group_pattern ::= "(" pattern ")"\n'
+             '\n'
+             'In simple terms "(P)" has the same effect as "P".\n'
+             '\n'
+             '\n'
+             'Sequence Patterns\n'
+             '~~~~~~~~~~~~~~~~~\n'
+             '\n'
+             'A sequence pattern contains several subpatterns to be matched '
+             'against\n'
+             'sequence elements. The syntax is similar to the unpacking of a '
+             'list or\n'
+             'tuple.\n'
+             '\n'
+             '   sequence_pattern       ::= "[" [maybe_sequence_pattern] "]"\n'
+             '                        | "(" [open_sequence_pattern] ")"\n'
+             '   open_sequence_pattern  ::= maybe_star_pattern "," '
+             '[maybe_sequence_pattern]\n'
+             '   maybe_sequence_pattern ::= ",".maybe_star_pattern+ ","?\n'
+             '   maybe_star_pattern     ::= star_pattern | pattern\n'
+             '   star_pattern           ::= "*" (capture_pattern | '
+             'wildcard_pattern)\n'
+             '\n'
+             'There is no difference if parentheses  or square brackets are '
+             'used for\n'
+             'sequence patterns (i.e. "(...)" vs "[...]" ).\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  A single pattern enclosed in parentheses without a trailing '
+             'comma\n'
+             '  (e.g. "(3 | 4)") is a group pattern. While a single pattern '
+             'enclosed\n'
+             '  in square brackets (e.g. "[3 | 4]") is still a sequence '
+             'pattern.\n'
+             '\n'
+             'At most one star subpattern may be in a sequence pattern.  The '
+             'star\n'
+             'subpattern may occur in any position. If no star subpattern is\n'
+             'present, the sequence pattern is a fixed-length sequence '
+             'pattern;\n'
+             'otherwise it is a variable-length sequence pattern.\n'
+             '\n'
+             'The following is the logical flow for matching a sequence '
+             'pattern\n'
+             'against a subject value:\n'
+             '\n'
+             '1. If the subject value is not a sequence [2], the sequence '
+             'pattern\n'
+             '   fails.\n'
+             '\n'
+             '2. If the subject value is an instance of "str", "bytes" or\n'
+             '   "bytearray" the sequence pattern fails.\n'
+             '\n'
+             '3. The subsequent steps depend on whether the sequence pattern '
+             'is\n'
+             '   fixed or variable-length.\n'
+             '\n'
+             '   If the sequence pattern is fixed-length:\n'
+             '\n'
+             '   1. If the length of the subject sequence is not equal to the '
+             'number\n'
+             '      of subpatterns, the sequence pattern fails\n'
+             '\n'
+             '   2. Subpatterns in the sequence pattern are matched to their\n'
+             '      corresponding items in the subject sequence from left to '
+             'right.\n'
+             '      Matching stops as soon as a subpattern fails.  If all\n'
+             '      subpatterns succeed in matching their corresponding item, '
+             'the\n'
+             '      sequence pattern succeeds.\n'
+             '\n'
+             '   Otherwise, if the sequence pattern is variable-length:\n'
+             '\n'
+             '   1. If the length of the subject sequence is less than the '
+             'number of\n'
+             '      non-star subpatterns, the sequence pattern fails.\n'
+             '\n'
+             '   2. The leading non-star subpatterns are matched to their\n'
+             '      corresponding items as for fixed-length sequences.\n'
+             '\n'
+             '   3. If the previous step succeeds, the star subpattern matches '
+             'a\n'
+             '      list formed of the remaining subject items, excluding the\n'
+             '      remaining items corresponding to non-star subpatterns '
+             'following\n'
+             '      the star subpattern.\n'
+             '\n'
+             '   4. Remaining non-star subpatterns are matched to their\n'
+             '      corresponding subject items, as for a fixed-length '
+             'sequence.\n'
+             '\n'
+             '   Note:\n'
+             '\n'
+             '     The length of the subject sequence is obtained via "len()" '
+             '(i.e.\n'
+             '     via the "__len__()" protocol).  This length may be cached '
+             'by the\n'
+             '     interpreter in a similar manner as value patterns.\n'
+             '\n'
+             'In simple terms "[P1, P2, P3," … ", P<N>]" matches only if all '
+             'the\n'
+             'following happens:\n'
+             '\n'
+             '* check "<subject>" is a sequence\n'
+             '\n'
+             '* "len(subject) == <N>"\n'
+             '\n'
+             '* "P1" matches "<subject>[0]" (note that this match can also '
+             'bind\n'
+             '  names)\n'
+             '\n'
+             '* "P2" matches "<subject>[1]" (note that this match can also '
+             'bind\n'
+             '  names)\n'
+             '\n'
+             '* … and so on for the corresponding pattern/element.\n'
+             '\n'
+             '\n'
+             'Mapping Patterns\n'
+             '~~~~~~~~~~~~~~~~\n'
+             '\n'
+             'A mapping pattern contains one or more key-value patterns.  The '
+             'syntax\n'
+             'is similar to the construction of a dictionary. Syntax:\n'
+             '\n'
+             '   mapping_pattern     ::= "{" [items_pattern] "}"\n'
+             '   items_pattern       ::= ",".key_value_pattern+ ","?\n'
+             '   key_value_pattern   ::= (literal_pattern | value_pattern) ":" '
+             'pattern\n'
+             '                         | double_star_pattern\n'
+             '   double_star_pattern ::= "**" capture_pattern\n'
+             '\n'
+             'At most one double star pattern may be in a mapping pattern.  '
+             'The\n'
+             'double star pattern must be the last subpattern in the mapping\n'
+             'pattern.\n'
+             '\n'
+             'Duplicate keys in mapping patterns are disallowed. Duplicate '
+             'literal\n'
+             'keys will raise a "SyntaxError". Two keys that otherwise have '
+             'the same\n'
+             'value will raise a "ValueError" at runtime.\n'
+             '\n'
+             'The following is the logical flow for matching a mapping '
+             'pattern\n'
+             'against a subject value:\n'
+             '\n'
+             '1. If the subject value is not a mapping [3],the mapping '
+             'pattern\n'
+             '   fails.\n'
+             '\n'
+             '2. If every key given in the mapping pattern is present in the '
+             'subject\n'
+             '   mapping, and the pattern for each key matches the '
+             'corresponding\n'
+             '   item of the subject mapping, the mapping pattern succeeds.\n'
+             '\n'
+             '3. If duplicate keys are detected in the mapping pattern, the '
+             'pattern\n'
+             '   is considered invalid. A "SyntaxError" is raised for '
+             'duplicate\n'
+             '   literal values; or a "ValueError" for named keys of the same '
+             'value.\n'
+             '\n'
+             'Note:\n'
+             '\n'
+             '  Key-value pairs are matched using the two-argument form of '
+             'the\n'
+             '  mapping subject’s "get()" method.  Matched key-value pairs '
+             'must\n'
+             '  already be present in the mapping, and not created on-the-fly '
+             'via\n'
+             '  "__missing__()" or "__getitem__()".\n'
+             '\n'
+             'In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all '
+             'the\n'
+             'following happens:\n'
+             '\n'
+             '* check "<subject>" is a mapping\n'
+             '\n'
+             '* "KEY1 in <subject>"\n'
+             '\n'
+             '* "P1" matches "<subject>[KEY1]"\n'
+             '\n'
+             '* … and so on for the corresponding KEY/pattern pair.\n'
+             '\n'
+             '\n'
+             'Class Patterns\n'
+             '~~~~~~~~~~~~~~\n'
+             '\n'
+             'A class pattern represents a class and its positional and '
+             'keyword\n'
+             'arguments (if any).  Syntax:\n'
+             '\n'
+             '   class_pattern       ::= name_or_attr "(" [pattern_arguments '
+             '","?] ")"\n'
+             '   pattern_arguments   ::= positional_patterns ["," '
+             'keyword_patterns]\n'
+             '                         | keyword_patterns\n'
+             '   positional_patterns ::= ",".pattern+\n'
+             '   keyword_patterns    ::= ",".keyword_pattern+\n'
+             '   keyword_pattern     ::= NAME "=" pattern\n'
+             '\n'
+             'The same keyword should not be repeated in class patterns.\n'
+             '\n'
+             'The following is the logical flow for matching a class pattern '
+             'against\n'
+             'a subject value:\n'
+             '\n'
+             '1. If "name_or_attr" is not an instance of the builtin "type" , '
+             'raise\n'
+             '   "TypeError".\n'
+             '\n'
+             '2. If the subject value is not an instance of "name_or_attr" '
+             '(tested\n'
+             '   via "isinstance()"), the class pattern fails.\n'
+             '\n'
+             '3. If no pattern arguments are present, the pattern succeeds.\n'
+             '   Otherwise, the subsequent steps depend on whether keyword or\n'
+             '   positional argument patterns are present.\n'
+             '\n'
+             '   For a number of built-in types (specified below), a single\n'
+             '   positional subpattern is accepted which will match the '
+             'entire\n'
+             '   subject; for these types keyword patterns also work as for '
+             'other\n'
+             '   types.\n'
+             '\n'
+             '   If only keyword patterns are present, they are processed as\n'
+             '   follows, one by one:\n'
+             '\n'
+             '   I. The keyword is looked up as an attribute on the subject.\n'
+             '\n'
+             '      * If this raises an exception other than "AttributeError", '
+             'the\n'
+             '        exception bubbles up.\n'
+             '\n'
+             '      * If this raises "AttributeError", the class pattern has '
+             'failed.\n'
+             '\n'
+             '      * Else, the subpattern associated with the keyword pattern '
+             'is\n'
+             '        matched against the subject’s attribute value.  If this '
+             'fails,\n'
+             '        the class pattern fails; if this succeeds, the match '
+             'proceeds\n'
+             '        to the next keyword.\n'
+             '\n'
+             '   II. If all keyword patterns succeed, the class pattern '
+             'succeeds.\n'
+             '\n'
+             '   If any positional patterns are present, they are converted '
+             'to\n'
+             '   keyword patterns using the "__match_args__" attribute on the '
+             'class\n'
+             '   "name_or_attr" before matching:\n'
+             '\n'
+             '   I. The equivalent of "getattr(cls, "__match_args__", ())" is\n'
+             '   called.\n'
+             '\n'
+             '      * If this raises an exception, the exception bubbles up.\n'
+             '\n'
+             '      * If the returned value is not a tuple, the conversion '
+             'fails and\n'
+             '        "TypeError" is raised.\n'
+             '\n'
+             '      * If there are more positional patterns than\n'
+             '        "len(cls.__match_args__)", "TypeError" is raised.\n'
+             '\n'
+             '      * Otherwise, positional pattern "i" is converted to a '
+             'keyword\n'
+             '        pattern using "__match_args__[i]" as the keyword.\n'
+             '        "__match_args__[i]" must be a string; if not "TypeError" '
+             'is\n'
+             '        raised.\n'
+             '\n'
+             '      * If there are duplicate keywords, "TypeError" is raised.\n'
+             '\n'
+             '      See also:\n'
+             '\n'
+             '        Customizing positional arguments in class pattern '
+             'matching\n'
+             '\n'
+             '   II. Once all positional patterns have been converted to '
+             'keyword\n'
+             '   patterns,\n'
+             '      the match proceeds as if there were only keyword '
+             'patterns.\n'
+             '\n'
+             '   For the following built-in types the handling of positional\n'
+             '   subpatterns is different:\n'
+             '\n'
+             '   * "bool"\n'
+             '\n'
+             '   * "bytearray"\n'
+             '\n'
+             '   * "bytes"\n'
+             '\n'
+             '   * "dict"\n'
+             '\n'
+             '   * "float"\n'
+             '\n'
+             '   * "frozenset"\n'
+             '\n'
+             '   * "int"\n'
+             '\n'
+             '   * "list"\n'
+             '\n'
+             '   * "set"\n'
+             '\n'
+             '   * "str"\n'
+             '\n'
+             '   * "tuple"\n'
+             '\n'
+             '   These classes accept a single positional argument, and the '
+             'pattern\n'
+             '   there is matched against the whole object rather than an '
+             'attribute.\n'
+             '   For example "int(0|1)" matches the value "0", but not the '
+             'values\n'
+             '   "0.0" or "False".\n'
+             '\n'
+             'In simple terms "CLS(P1, attr=P2)" matches only if the '
+             'following\n'
+             'happens:\n'
+             '\n'
+             '* "isinstance(<subject>, CLS)"\n'
+             '\n'
+             '* convert "P1" to a keyword pattern using "CLS.__match_args__"\n'
+             '\n'
+             '* For each keyword argument "attr=P2":\n'
+             '     * "hasattr(<subject>, "attr")"\n'
+             '\n'
+             '     * "P2" matches "<subject>.attr"\n'
+             '\n'
+             '* … and so on for the corresponding keyword argument/pattern '
+             'pair.\n'
+             '\n'
+             'See also:\n'
+             '\n'
+             '  * **PEP 634** – Structural Pattern Matching: Specification\n'
+             '\n'
+             '  * **PEP 636** – Structural Pattern Matching: Tutorial\n'
+             '\n'
+             '\n'
              'Function definitions\n'
              '====================\n'
              '\n'
@@ -2655,7 +3472,6 @@
              '   decorators                ::= decorator+\n'
              '   decorator                 ::= "@" assignment_expression '
              'NEWLINE\n'
-             '   dotted_name               ::= identifier ("." identifier)*\n'
              '   parameter_list            ::= defparameter ("," '
              'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
              '                        | parameter_list_no_posonly\n'
@@ -2680,7 +3496,7 @@
              '\n'
              'The function definition does not execute the function body; this '
              'gets\n'
-             'executed only when the function is called. [2]\n'
+             'executed only when the function is called. [4]\n'
              '\n'
              'A function definition may be wrapped by one or more *decorator*\n'
              'expressions. Decorator expressions are evaluated when the '
@@ -2733,17 +3549,17 @@
              '“pre-\n'
              'computed” value is used for each call.  This is especially '
              'important\n'
-             'to understand when a default parameter is a mutable object, such '
-             'as a\n'
-             'list or a dictionary: if the function modifies the object (e.g. '
-             'by\n'
-             'appending an item to a list), the default value is in effect '
-             'modified.\n'
-             'This is generally not what was intended.  A way around this is '
-             'to use\n'
-             '"None" as the default, and explicitly test for it in the body of '
-             'the\n'
-             'function, e.g.:\n'
+             'to understand when a default parameter value is a mutable '
+             'object, such\n'
+             'as a list or a dictionary: if the function modifies the object '
+             '(e.g.\n'
+             'by appending an item to a list), the default parameter value is '
+             'in\n'
+             'effect modified.  This is generally not what was intended.  A '
+             'way\n'
+             'around this is to use "None" as the default, and explicitly test '
+             'for\n'
+             'it in the body of the function, e.g.:\n'
              '\n'
              '   def whats_on_the_telly(penguin=None):\n'
              '       if penguin is None:\n'
@@ -2755,7 +3571,7 @@
              'Calls.\n'
              'A function call always assigns values to all parameters '
              'mentioned in\n'
-             'the parameter list, either from position arguments, from '
+             'the parameter list, either from positional arguments, from '
              'keyword\n'
              'arguments, or from default values.  If the form “"*identifier"” '
              'is\n'
@@ -2767,8 +3583,14 @@
              'new\n'
              'empty mapping of the same type.  Parameters after “"*"” or\n'
              '“"*identifier"” are keyword-only parameters and may only be '
-             'passed\n'
-             'used keyword arguments.\n'
+             'passed by\n'
+             'keyword arguments.  Parameters before “"/"” are positional-only\n'
+             'parameters and may only be passed by positional arguments.\n'
+             '\n'
+             'Changed in version 3.8: The "/" function parameter syntax may be '
+             'used\n'
+             'to indicate positional-only parameters. See **PEP 570** for '
+             'details.\n'
              '\n'
              'Parameters may have an *annotation* of the form “": '
              'expression"”\n'
@@ -2879,7 +3701,7 @@
              'function definitions.)  When the class’s suite finishes '
              'execution, its\n'
              'execution frame is discarded but its local namespace is saved. '
-             '[3] A\n'
+             '[5] A\n'
              'class object is then created using the inheritance list for the '
              'base\n'
              'classes and the saved local namespace for the attribute '
@@ -2964,12 +3786,9 @@
              '\n'
              'Execution of Python coroutines can be suspended and resumed at '
              'many\n'
-             'points (see *coroutine*).  Inside the body of a coroutine '
-             'function,\n'
-             '"await" and "async" identifiers become reserved keywords; '
-             '"await"\n'
-             'expressions, "async for" and "async with" can only be used in\n'
-             'coroutine function bodies.\n'
+             'points (see *coroutine*). "await" expressions, "async for" and '
+             '"async\n'
+             'with" can only be used in the body of a coroutine function.\n'
              '\n'
              'Functions defined with "async def" syntax are always coroutine\n'
              'functions, even if they do not contain "await" or "async" '
@@ -2985,19 +3804,22 @@
              '       do_stuff()\n'
              '       await some_coroutine()\n'
              '\n'
+             'Changed in version 3.7: "await" and "async" are now keywords;\n'
+             'previously they were only treated as such inside the body of a\n'
+             'coroutine function.\n'
+             '\n'
              '\n'
              'The "async for" statement\n'
              '-------------------------\n'
              '\n'
              '   async_for_stmt ::= "async" for_stmt\n'
              '\n'
-             'An *asynchronous iterable* is able to call asynchronous code in '
-             'its\n'
-             '*iter* implementation, and *asynchronous iterator* can call\n'
-             'asynchronous code in its *next* method.\n'
+             'An *asynchronous iterable* provides an "__aiter__" method that\n'
+             'directly returns an *asynchronous iterator*, which can call\n'
+             'asynchronous code in its "__anext__" method.\n'
              '\n'
              'The "async for" statement allows convenient iteration over\n'
-             'asynchronous iterators.\n'
+             'asynchronous iterables.\n'
              '\n'
              'The following code:\n'
              '\n'
@@ -3083,13 +3905,65 @@
              'exception.\n'
              '    That new exception causes the old one to be lost.\n'
              '\n'
-             '[2] A string literal appearing as the first statement in the '
+             '[2] In pattern matching, a sequence is defined as one of the\n'
+             '    following:\n'
+             '\n'
+             '       * a class that inherits from "collections.abc.Sequence"\n'
+             '\n'
+             '       * a Python class that has been registered as\n'
+             '         "collections.abc.Sequence"\n'
+             '\n'
+             '       * a builtin class that has its (CPython) '
+             '"Py_TPFLAGS_SEQUENCE"\n'
+             '         bit set\n'
+             '\n'
+             '       * a class that inherits from any of the above\n'
+             '\n'
+             '    The following standard library classes are sequences:\n'
+             '\n'
+             '       * "array.array"\n'
+             '\n'
+             '       * "collections.deque"\n'
+             '\n'
+             '       * "list"\n'
+             '\n'
+             '       * "memoryview"\n'
+             '\n'
+             '       * "range"\n'
+             '\n'
+             '       * "tuple"\n'
+             '\n'
+             '    Note:\n'
+             '\n'
+             '      Subject values of type "str", "bytes", and "bytearray" do '
+             'not\n'
+             '      match sequence patterns.\n'
+             '\n'
+             '[3] In pattern matching, a mapping is defined as one of the '
+             'following:\n'
+             '\n'
+             '       * a class that inherits from "collections.abc.Mapping"\n'
+             '\n'
+             '       * a Python class that has been registered as\n'
+             '         "collections.abc.Mapping"\n'
+             '\n'
+             '       * a builtin class that has its (CPython) '
+             '"Py_TPFLAGS_MAPPING"\n'
+             '         bit set\n'
+             '\n'
+             '       * a class that inherits from any of the above\n'
+             '\n'
+             '    The standard library classes "dict" and '
+             '"types.MappingProxyType"\n'
+             '    are mappings.\n'
+             '\n'
+             '[4] A string literal appearing as the first statement in the '
              'function\n'
              '    body is transformed into the function’s "__doc__" attribute '
              'and\n'
              '    therefore the function’s *docstring*.\n'
              '\n'
-             '[3] A string literal appearing as the first statement in the '
+             '[5] A string literal appearing as the first statement in the '
              'class\n'
              '    body is transformed into the namespace’s "__doc__" item and\n'
              '    therefore the class’s *docstring*.\n',
@@ -3227,13 +4101,13 @@
                   '\n'
                   '   If "__new__()" is invoked during object construction and '
                   'it returns\n'
-                  '   an instance or subclass of *cls*, then the new '
-                  'instance’s\n'
-                  '   "__init__()" method will be invoked like '
-                  '"__init__(self[, ...])",\n'
-                  '   where *self* is the new instance and the remaining '
-                  'arguments are\n'
-                  '   the same as were passed to the object constructor.\n'
+                  '   an instance of *cls*, then the new instance’s '
+                  '"__init__()" method\n'
+                  '   will be invoked like "__init__(self[, ...])", where '
+                  '*self* is the\n'
+                  '   new instance and the remaining arguments are the same as '
+                  'were\n'
+                  '   passed to the object constructor.\n'
                   '\n'
                   '   If "__new__()" does not return an instance of *cls*, '
                   'then the new\n'
@@ -3728,17 +4602,16 @@
              'debugger will pause execution just before the first line of the\n'
              'module.\n'
              '\n'
-             'The typical usage to break into the debugger from a running '
-             'program is\n'
-             'to insert\n'
+             'The typical usage to break into the debugger is to insert:\n'
              '\n'
              '   import pdb; pdb.set_trace()\n'
              '\n'
-             'at the location you want to break into the debugger.  You can '
-             'then\n'
-             'step through the code following this statement, and continue '
-             'running\n'
-             'without the debugger using the "continue" command.\n'
+             'at the location you want to break into the debugger, and then '
+             'run the\n'
+             'program. You can then step through the code following this '
+             'statement,\n'
+             'and continue running without the debugger using the "continue"\n'
+             'command.\n'
              '\n'
              'New in version 3.7: The built-in "breakpoint()", when called '
              'with\n'
@@ -4594,20 +5467,32 @@
               'binding\n'
               'operations.\n'
               '\n'
-              'The following constructs bind names: formal parameters to '
-              'functions,\n'
-              '"import" statements, class and function definitions (these bind '
-              'the\n'
-              'class or function name in the defining block), and targets that '
-              'are\n'
-              'identifiers if occurring in an assignment, "for" loop header, '
-              'or after\n'
-              '"as" in a "with" statement or "except" clause. The "import" '
-              'statement\n'
-              'of the form "from ... import *" binds all names defined in the\n'
-              'imported module, except those beginning with an underscore.  '
-              'This form\n'
-              'may only be used at the module level.\n'
+              'The following constructs bind names:\n'
+              '\n'
+              '* formal parameters to functions,\n'
+              '\n'
+              '* class definitions,\n'
+              '\n'
+              '* function definitions,\n'
+              '\n'
+              '* assignment expressions,\n'
+              '\n'
+              '* targets that are identifiers if occurring in an assignment:\n'
+              '\n'
+              '  * "for" loop header,\n'
+              '\n'
+              '  * after "as" in a "with" statement, "except" clause or in the '
+              'as-\n'
+              '    pattern in structural pattern matching,\n'
+              '\n'
+              '  * in a capture pattern in structural pattern matching\n'
+              '\n'
+              '* "import" statements.\n'
+              '\n'
+              'The "import" statement of the form "from ... import *" binds '
+              'all names\n'
+              'defined in the imported module, except those beginning with an\n'
+              'underscore. This form may only be used at the module level.\n'
               '\n'
               'A target occurring in a "del" statement is also considered '
               'bound for\n'
@@ -4680,9 +5565,9 @@
               'operations.\n'
               '\n'
               'If the "global" statement occurs within a block, all uses of '
-              'the name\n'
-              'specified in the statement refer to the binding of that name in '
-              'the\n'
+              'the names\n'
+              'specified in the statement refer to the bindings of those names '
+              'in the\n'
               'top-level namespace.  Names are resolved in the top-level '
               'namespace by\n'
               'searching the global namespace, i.e. the namespace of the '
@@ -4691,9 +5576,10 @@
               'namespace\n'
               'of the module "builtins".  The global namespace is searched '
               'first.  If\n'
-              'the name is not found there, the builtins namespace is '
-              'searched.  The\n'
-              '"global" statement must precede all uses of the name.\n'
+              'the names are not found there, the builtins namespace is '
+              'searched.\n'
+              'The "global" statement must precede all uses of the listed '
+              'names.\n'
               '\n'
               'The "global" statement has the same scope as a name binding '
               'operation\n'
@@ -4987,30 +5873,7 @@
         'all by the loop.  Hint: the built-in function "range()" returns an\n'
         'iterator of integers suitable to emulate the effect of Pascal’s "for '
         'i\n'
-        ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n'
-        '\n'
-        'Note:\n'
-        '\n'
-        '  There is a subtlety when the sequence is being modified by the '
-        'loop\n'
-        '  (this can only occur for mutable sequences, e.g. lists).  An\n'
-        '  internal counter is used to keep track of which item is used next,\n'
-        '  and this is incremented on each iteration.  When this counter has\n'
-        '  reached the length of the sequence the loop terminates.  This '
-        'means\n'
-        '  that if the suite deletes the current (or a previous) item from '
-        'the\n'
-        '  sequence, the next item will be skipped (since it gets the index '
-        'of\n'
-        '  the current item which has already been treated).  Likewise, if '
-        'the\n'
-        '  suite inserts an item in the sequence before the current item, the\n'
-        '  current item will be treated again the next time through the loop.\n'
-        '  This can lead to nasty bugs that can be avoided by making a\n'
-        '  temporary copy using a slice of the whole sequence, e.g.,\n'
-        '\n'
-        '     for x in a[:]:\n'
-        '         if x < 0: a.remove(x)\n',
+        ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n',
  'formatstrings': 'Format String Syntax\n'
                   '********************\n'
                   '\n'
@@ -5020,9 +5883,11 @@
                   '"Formatter",\n'
                   'subclasses can define their own format string syntax).  The '
                   'syntax is\n'
-                  'related to that of formatted string literals, but there '
-                  'are\n'
-                  'differences.\n'
+                  'related to that of formatted string literals, but it is '
+                  'less\n'
+                  'sophisticated and, in particular, does not support '
+                  'arbitrary\n'
+                  'expressions.\n'
                   '\n'
                   'Format strings contain “replacement fields” surrounded by '
                   'curly braces\n'
@@ -5216,7 +6081,7 @@
                   'character that can be any character and defaults to a space '
                   'if\n'
                   'omitted. It is not possible to use a literal curly brace '
-                  '(“"{"” or\n'
+                  '(”"{"” or\n'
                   '“"}"”) as the *fill* character in a formatted string '
                   'literal or when\n'
                   'using the "str.format()" method.  However, it is possible '
@@ -5254,9 +6119,9 @@
                   '   |           | in the form ‘+000000120’. This alignment '
                   'option is only    |\n'
                   '   |           | valid for numeric types.  It becomes the '
-                  'default when ‘0’  |\n'
-                  '   |           | immediately precedes the field '
-                  'width.                      |\n'
+                  'default for       |\n'
+                  '   |           | numbers when ‘0’ immediately precedes the '
+                  'field width.     |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'^\'"     | Forces the field to be centered within '
@@ -5311,19 +6176,19 @@
                   'complex\n'
                   'types. For integers, when binary, octal, or hexadecimal '
                   'output is\n'
-                  'used, this option adds the prefix respective "\'0b\'", '
-                  '"\'0o\'", or "\'0x\'"\n'
-                  'to the output value. For float and complex the alternate '
-                  'form causes\n'
-                  'the result of the conversion to always contain a '
-                  'decimal-point\n'
-                  'character, even if no digits follow it. Normally, a '
-                  'decimal-point\n'
-                  'character appears in the result of these conversions only '
-                  'if a digit\n'
-                  'follows it. In addition, for "\'g\'" and "\'G\'" '
-                  'conversions, trailing\n'
-                  'zeros are not removed from the result.\n'
+                  'used, this option adds the respective prefix "\'0b\'", '
+                  '"\'0o\'", "\'0x\'",\n'
+                  'or "\'0X\'" to the output value. For float and complex the '
+                  'alternate\n'
+                  'form causes the result of the conversion to always contain '
+                  'a decimal-\n'
+                  'point character, even if no digits follow it. Normally, a '
+                  'decimal-\n'
+                  'point character appears in the result of these conversions '
+                  'only if a\n'
+                  'digit follows it. In addition, for "\'g\'" and "\'G\'" '
+                  'conversions,\n'
+                  'trailing zeros are not removed from the result.\n'
                   '\n'
                   'The "\',\'" option signals the use of a comma for a '
                   'thousands separator.\n'
@@ -5364,19 +6229,23 @@
                   'with an\n'
                   '*alignment* type of "\'=\'".\n'
                   '\n'
-                  'The *precision* is a decimal number indicating how many '
+                  'Changed in version 3.10: Preceding the *width* field by '
+                  '"\'0\'" no\n'
+                  'longer affects the default alignment for strings.\n'
+                  '\n'
+                  'The *precision* is a decimal integer indicating how many '
                   'digits should\n'
-                  'be displayed after the decimal point for a floating point '
-                  'value\n'
-                  'formatted with "\'f\'" and "\'F\'", or before and after the '
-                  'decimal point\n'
-                  'for a floating point value formatted with "\'g\'" or '
-                  '"\'G\'".  For non-\n'
-                  'number types the field indicates the maximum field size - '
-                  'in other\n'
-                  'words, how many characters will be used from the field '
-                  'content. The\n'
-                  '*precision* is not allowed for integer values.\n'
+                  'be displayed after the decimal point for presentation types '
+                  '"\'f\'" and\n'
+                  '"\'F\'", or before and after the decimal point for '
+                  'presentation types\n'
+                  '"\'g\'" or "\'G\'".  For string presentation types the '
+                  'field indicates the\n'
+                  'maximum field size - in other words, how many characters '
+                  'will be used\n'
+                  'from the field content.  The *precision* is not allowed for '
+                  'integer\n'
+                  'presentation types.\n'
                   '\n'
                   'Finally, the *type* determines how the data should be '
                   'presented.\n'
@@ -5436,8 +6305,12 @@
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'X\'"     | Hex format. Outputs the number in base '
                   '16, using upper-    |\n'
-                  '   |           | case letters for the digits above '
-                  '9.                       |\n'
+                  '   |           | case letters for the digits above 9. In '
+                  'case "\'#\'" is      |\n'
+                  '   |           | specified, the prefix "\'0x\'" will be '
+                  'upper-cased to "\'0X\'" |\n'
+                  '   |           | as '
+                  'well.                                                   |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'n\'"     | Number. This is the same as "\'d\'", '
@@ -5530,44 +6403,51 @@
                   '   |           | formats the result in either fixed-point '
                   'format or in      |\n'
                   '   |           | scientific notation, depending on its '
-                  'magnitude.  The      |\n'
-                  '   |           | precise rules are as follows: suppose that '
-                  'the result      |\n'
+                  'magnitude. A         |\n'
+                  '   |           | precision of "0" is treated as equivalent '
+                  'to a precision   |\n'
+                  '   |           | of "1".  The precise rules are as follows: '
+                  'suppose that    |\n'
+                  '   |           | the result formatted with presentation '
+                  'type "\'e\'" and      |\n'
+                  '   |           | precision "p-1" would have exponent '
+                  '"exp".  Then, if "m <= |\n'
+                  '   |           | exp < p", where "m" is -4 for floats and '
+                  '-6 for            |\n'
+                  '   |           | "Decimals", the number is formatted with '
+                  'presentation type |\n'
+                  '   |           | "\'f\'" and precision "p-1-exp".  '
+                  'Otherwise, the number is   |\n'
                   '   |           | formatted with presentation type "\'e\'" '
-                  'and precision "p-1" |\n'
-                  '   |           | would have exponent "exp".  Then, if "m <= '
-                  'exp < p", where |\n'
-                  '   |           | "m" is -4 for floats and -6 for '
-                  '"Decimals", the number is  |\n'
-                  '   |           | formatted with presentation type "\'f\'" '
                   'and precision       |\n'
-                  '   |           | "p-1-exp".  Otherwise, the number is '
-                  'formatted with        |\n'
-                  '   |           | presentation type "\'e\'" and precision '
-                  '"p-1". In both cases |\n'
-                  '   |           | insignificant trailing zeros are removed '
-                  'from the          |\n'
-                  '   |           | significand, and the decimal point is also '
-                  'removed if      |\n'
-                  '   |           | there are no remaining digits following '
-                  'it, unless the     |\n'
-                  '   |           | "\'#\'" option is used.  Positive and '
-                  'negative infinity,     |\n'
-                  '   |           | positive and negative zero, and nans, are '
-                  'formatted as     |\n'
-                  '   |           | "inf", "-inf", "0", "-0" and "nan" '
-                  'respectively,           |\n'
-                  '   |           | regardless of the precision.  A precision '
-                  'of "0" is        |\n'
-                  '   |           | treated as equivalent to a precision of '
-                  '"1". With no       |\n'
-                  '   |           | precision given, uses a precision of "6" '
-                  'significant       |\n'
-                  '   |           | digits for "float", and shows all '
-                  'coefficient digits for   |\n'
-                  '   |           | '
-                  '"Decimal".                                                 '
-                  '|\n'
+                  '   |           | "p-1". In both cases insignificant '
+                  'trailing zeros are      |\n'
+                  '   |           | removed from the significand, and the '
+                  'decimal point is     |\n'
+                  '   |           | also removed if there are no remaining '
+                  'digits following    |\n'
+                  '   |           | it, unless the "\'#\'" option is used.  '
+                  'With no precision    |\n'
+                  '   |           | given, uses a precision of "6" significant '
+                  'digits for      |\n'
+                  '   |           | "float". For "Decimal", the coefficient of '
+                  'the result is   |\n'
+                  '   |           | formed from the coefficient digits of the '
+                  'value;           |\n'
+                  '   |           | scientific notation is used for values '
+                  'smaller than "1e-6" |\n'
+                  '   |           | in absolute value and values where the '
+                  'place value of the  |\n'
+                  '   |           | least significant digit is larger than 1, '
+                  'and fixed-point  |\n'
+                  '   |           | notation is used otherwise.  Positive and '
+                  'negative         |\n'
+                  '   |           | infinity, positive and negative zero, and '
+                  'nans, are        |\n'
+                  '   |           | formatted as "inf", "-inf", "0", "-0" and '
+                  '"nan"            |\n'
+                  '   |           | respectively, regardless of the '
+                  'precision.                 |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'G\'"     | General format. Same as "\'g\'" except '
@@ -5592,19 +6472,24 @@
                   'percent sign.          |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
-                  '   | None      | Similar to "\'g\'", except that '
-                  'fixed-point notation, when   |\n'
-                  '   |           | used, has at least one digit past the '
-                  'decimal point. The   |\n'
-                  '   |           | default precision is as high as needed to '
-                  'represent the    |\n'
-                  '   |           | particular value. The overall effect is to '
-                  'match the       |\n'
-                  '   |           | output of "str()" as altered by the other '
-                  'format           |\n'
-                  '   |           | '
-                  'modifiers.                                                 '
-                  '|\n'
+                  '   | None      | For "float" this is the same as "\'g\'", '
+                  'except that when    |\n'
+                  '   |           | fixed-point notation is used to format the '
+                  'result, it      |\n'
+                  '   |           | always includes at least one digit past '
+                  'the decimal point. |\n'
+                  '   |           | The precision used is as large as needed '
+                  'to represent the  |\n'
+                  '   |           | given value faithfully.  For "Decimal", '
+                  'this is the same   |\n'
+                  '   |           | as either "\'g\'" or "\'G\'" depending on '
+                  'the value of         |\n'
+                  '   |           | "context.capitals" for the current decimal '
+                  'context.  The   |\n'
+                  '   |           | overall effect is to match the output of '
+                  '"str()" as        |\n'
+                  '   |           | altered by the other format '
+                  'modifiers.                     |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '\n'
@@ -5782,7 +6667,6 @@
              '   decorators                ::= decorator+\n'
              '   decorator                 ::= "@" assignment_expression '
              'NEWLINE\n'
-             '   dotted_name               ::= identifier ("." identifier)*\n'
              '   parameter_list            ::= defparameter ("," '
              'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
              '                        | parameter_list_no_posonly\n'
@@ -5807,7 +6691,7 @@
              '\n'
              'The function definition does not execute the function body; this '
              'gets\n'
-             'executed only when the function is called. [2]\n'
+             'executed only when the function is called. [4]\n'
              '\n'
              'A function definition may be wrapped by one or more *decorator*\n'
              'expressions. Decorator expressions are evaluated when the '
@@ -5860,17 +6744,17 @@
              '“pre-\n'
              'computed” value is used for each call.  This is especially '
              'important\n'
-             'to understand when a default parameter is a mutable object, such '
-             'as a\n'
-             'list or a dictionary: if the function modifies the object (e.g. '
-             'by\n'
-             'appending an item to a list), the default value is in effect '
-             'modified.\n'
-             'This is generally not what was intended.  A way around this is '
-             'to use\n'
-             '"None" as the default, and explicitly test for it in the body of '
-             'the\n'
-             'function, e.g.:\n'
+             'to understand when a default parameter value is a mutable '
+             'object, such\n'
+             'as a list or a dictionary: if the function modifies the object '
+             '(e.g.\n'
+             'by appending an item to a list), the default parameter value is '
+             'in\n'
+             'effect modified.  This is generally not what was intended.  A '
+             'way\n'
+             'around this is to use "None" as the default, and explicitly test '
+             'for\n'
+             'it in the body of the function, e.g.:\n'
              '\n'
              '   def whats_on_the_telly(penguin=None):\n'
              '       if penguin is None:\n'
@@ -5882,7 +6766,7 @@
              'Calls.\n'
              'A function call always assigns values to all parameters '
              'mentioned in\n'
-             'the parameter list, either from position arguments, from '
+             'the parameter list, either from positional arguments, from '
              'keyword\n'
              'arguments, or from default values.  If the form “"*identifier"” '
              'is\n'
@@ -5894,8 +6778,14 @@
              'new\n'
              'empty mapping of the same type.  Parameters after “"*"” or\n'
              '“"*identifier"” are keyword-only parameters and may only be '
-             'passed\n'
-             'used keyword arguments.\n'
+             'passed by\n'
+             'keyword arguments.  Parameters before “"/"” are positional-only\n'
+             'parameters and may only be passed by positional arguments.\n'
+             '\n'
+             'Changed in version 3.8: The "/" function parameter syntax may be '
+             'used\n'
+             'to indicate positional-only parameters. See **PEP 570** for '
+             'details.\n'
              '\n'
              'Parameters may have an *annotation* of the form “": '
              'expression"”\n'
@@ -5987,8 +6877,10 @@
            '\n'
            'Names listed in a "global" statement must not be defined as '
            'formal\n'
-           'parameters or in a "for" loop control target, "class" definition,\n'
-           'function definition, "import" statement, or variable annotation.\n'
+           'parameters, or as targets in "with" statements or "except" '
+           'clauses, or\n'
+           'in a "for" target list, "class" definition, function definition,\n'
+           '"import" statement, or variable annotation.\n'
            '\n'
            '**CPython implementation detail:** The current implementation does '
            'not\n'
@@ -6020,22 +6912,31 @@
                'trailing underscore characters:\n'
                '\n'
                '"_*"\n'
-               '   Not imported by "from module import *".  The special '
-               'identifier "_"\n'
-               '   is used in the interactive interpreter to store the result '
-               'of the\n'
-               '   last evaluation; it is stored in the "builtins" module.  '
-               'When not\n'
-               '   in interactive mode, "_" has no special meaning and is not '
-               'defined.\n'
-               '   See section The import statement.\n'
+               '   Not imported by "from module import *".\n'
+               '\n'
+               '"_"\n'
+               '   In a "case" pattern within a "match" statement, "_" is a '
+               'soft\n'
+               '   keyword that denotes a wildcard.\n'
+               '\n'
+               '   Separately, the interactive interpreter makes the result of '
+               'the\n'
+               '   last evaluation available in the variable "_". (It is '
+               'stored in the\n'
+               '   "builtins" module, alongside built-in functions like '
+               '"print".)\n'
+               '\n'
+               '   Elsewhere, "_" is a regular identifier. It is often used to '
+               'name\n'
+               '   “special” items, but it is not special to Python itself.\n'
                '\n'
                '   Note:\n'
                '\n'
                '     The name "_" is often used in conjunction with\n'
                '     internationalization; refer to the documentation for the\n'
                '     "gettext" module for more information on this '
-               'convention.\n'
+               'convention.It is\n'
+               '     also commonly used for unused variables.\n'
                '\n'
                '"__*__"\n'
                '   System-defined names, informally known as “dunder” names. '
@@ -6158,6 +7059,28 @@
                 '   async      elif       if         or         yield\n'
                 '\n'
                 '\n'
+                'Soft Keywords\n'
+                '=============\n'
+                '\n'
+                'New in version 3.10.\n'
+                '\n'
+                'Some identifiers are only reserved under specific contexts. '
+                'These are\n'
+                'known as *soft keywords*.  The identifiers "match", "case" '
+                'and "_" can\n'
+                'syntactically act as keywords in contexts related to the '
+                'pattern\n'
+                'matching statement, but this distinction is done at the '
+                'parser level,\n'
+                'not when tokenizing.\n'
+                '\n'
+                'As soft keywords, their use with pattern matching is possible '
+                'while\n'
+                'still preserving compatibility with existing code that uses '
+                '"match",\n'
+                '"case" and "_" as identifier names.\n'
+                '\n'
+                '\n'
                 'Reserved classes of identifiers\n'
                 '===============================\n'
                 '\n'
@@ -6168,15 +7091,23 @@
                 'trailing underscore characters:\n'
                 '\n'
                 '"_*"\n'
-                '   Not imported by "from module import *".  The special '
-                'identifier "_"\n'
-                '   is used in the interactive interpreter to store the result '
+                '   Not imported by "from module import *".\n'
+                '\n'
+                '"_"\n'
+                '   In a "case" pattern within a "match" statement, "_" is a '
+                'soft\n'
+                '   keyword that denotes a wildcard.\n'
+                '\n'
+                '   Separately, the interactive interpreter makes the result '
                 'of the\n'
-                '   last evaluation; it is stored in the "builtins" module.  '
-                'When not\n'
-                '   in interactive mode, "_" has no special meaning and is not '
-                'defined.\n'
-                '   See section The import statement.\n'
+                '   last evaluation available in the variable "_". (It is '
+                'stored in the\n'
+                '   "builtins" module, alongside built-in functions like '
+                '"print".)\n'
+                '\n'
+                '   Elsewhere, "_" is a regular identifier. It is often used '
+                'to name\n'
+                '   “special” items, but it is not special to Python itself.\n'
                 '\n'
                 '   Note:\n'
                 '\n'
@@ -6184,7 +7115,8 @@
                 '     internationalization; refer to the documentation for '
                 'the\n'
                 '     "gettext" module for more information on this '
-                'convention.\n'
+                'convention.It is\n'
+                '     also commonly used for unused variables.\n'
                 '\n'
                 '"__*__"\n'
                 '   System-defined names, informally known as “dunder” names. '
@@ -6256,7 +7188,7 @@
            '                   | "from" relative_module "import" "(" '
            'identifier ["as" identifier]\n'
            '                   ("," identifier ["as" identifier])* [","] ")"\n'
-           '                   | "from" module "import" "*"\n'
+           '                   | "from" relative_module "import" "*"\n'
            '   module          ::= (identifier ".")* identifier\n'
            '   relative_module ::= "."* module | "."+\n'
            '\n'
@@ -6600,10 +7532,7 @@
  'lambda': 'Lambdas\n'
            '*******\n'
            '\n'
-           '   lambda_expr        ::= "lambda" [parameter_list] ":" '
-           'expression\n'
-           '   lambda_expr_nocond ::= "lambda" [parameter_list] ":" '
-           'expression_nocond\n'
+           '   lambda_expr ::= "lambda" [parameter_list] ":" expression\n'
            '\n'
            'Lambda expressions (sometimes called lambda forms) are used to '
            'create\n'
@@ -6648,20 +7577,32 @@
            '*Names* refer to objects.  Names are introduced by name binding\n'
            'operations.\n'
            '\n'
-           'The following constructs bind names: formal parameters to '
-           'functions,\n'
-           '"import" statements, class and function definitions (these bind '
-           'the\n'
-           'class or function name in the defining block), and targets that '
-           'are\n'
-           'identifiers if occurring in an assignment, "for" loop header, or '
-           'after\n'
-           '"as" in a "with" statement or "except" clause. The "import" '
-           'statement\n'
-           'of the form "from ... import *" binds all names defined in the\n'
-           'imported module, except those beginning with an underscore.  This '
-           'form\n'
-           'may only be used at the module level.\n'
+           'The following constructs bind names:\n'
+           '\n'
+           '* formal parameters to functions,\n'
+           '\n'
+           '* class definitions,\n'
+           '\n'
+           '* function definitions,\n'
+           '\n'
+           '* assignment expressions,\n'
+           '\n'
+           '* targets that are identifiers if occurring in an assignment:\n'
+           '\n'
+           '  * "for" loop header,\n'
+           '\n'
+           '  * after "as" in a "with" statement, "except" clause or in the '
+           'as-\n'
+           '    pattern in structural pattern matching,\n'
+           '\n'
+           '  * in a capture pattern in structural pattern matching\n'
+           '\n'
+           '* "import" statements.\n'
+           '\n'
+           'The "import" statement of the form "from ... import *" binds all '
+           'names\n'
+           'defined in the imported module, except those beginning with an\n'
+           'underscore. This form may only be used at the module level.\n'
            '\n'
            'A target occurring in a "del" statement is also considered bound '
            'for\n'
@@ -6731,8 +7672,8 @@
            'operations.\n'
            '\n'
            'If the "global" statement occurs within a block, all uses of the '
-           'name\n'
-           'specified in the statement refer to the binding of that name in '
+           'names\n'
+           'specified in the statement refer to the bindings of those names in '
            'the\n'
            'top-level namespace.  Names are resolved in the top-level '
            'namespace by\n'
@@ -6741,9 +7682,9 @@
            'namespace\n'
            'of the module "builtins".  The global namespace is searched '
            'first.  If\n'
-           'the name is not found there, the builtins namespace is searched.  '
-           'The\n'
-           '"global" statement must precede all uses of the name.\n'
+           'the names are not found there, the builtins namespace is '
+           'searched.\n'
+           'The "global" statement must precede all uses of the listed names.\n'
            '\n'
            'The "global" statement has the same scope as a name binding '
            'operation\n'
@@ -6883,7 +7824,7 @@
             '\n'
             'Note that numeric literals do not include a sign; a phrase like '
             '"-1"\n'
-            'is actually an expression composed of the unary operator ‘"-"‘ '
+            'is actually an expression composed of the unary operator ‘"-"’ '
             'and the\n'
             'literal "1".\n',
  'numeric-types': 'Emulating numeric types\n'
@@ -7028,16 +7969,6 @@
                   'the data\n'
                   '   model.\n'
                   '\n'
-                  '   Note:\n'
-                  '\n'
-                  '     Due to a bug in the dispatching mechanism for "**=", a '
-                  'class that\n'
-                  '     defines "__ipow__()" but returns "NotImplemented" '
-                  'would fail to\n'
-                  '     fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
-                  'bug is\n'
-                  '     fixed in Python 3.10.\n'
-                  '\n'
                   'object.__neg__(self)\n'
                   'object.__pos__(self)\n'
                   'object.__abs__(self)\n'
@@ -7088,9 +8019,9 @@
                   '   of the object truncated to an "Integral" (typically an '
                   '"int").\n'
                   '\n'
-                  '   If "__int__()" is not defined then the built-in function '
-                  '"int()"\n'
-                  '   falls back to "__trunc__()".\n',
+                  '   The built-in function "int()" falls back to '
+                  '"__trunc__()" if\n'
+                  '   neither "__int__()" nor "__index__()" is defined.\n',
  'objects': 'Objects, values and types\n'
             '*************************\n'
             '\n'
@@ -7224,8 +8155,8 @@
                      '\n'
                      'The following table summarizes the operator precedence '
                      'in Python, from\n'
-                     'lowest precedence (least binding) to highest precedence '
-                     '(most\n'
+                     'highest precedence (most binding) to lowest precedence '
+                     '(least\n'
                      'binding).  Operators in the same box have the same '
                      'precedence.  Unless\n'
                      'the syntax is explicitly given, operators are binary.  '
@@ -7244,65 +8175,6 @@
                      '| Operator                                        | '
                      'Description                           |\n'
                      '|=================================================|=======================================|\n'
-                     '| ":="                                            | '
-                     'Assignment expression                 |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "lambda"                                        | '
-                     'Lambda expression                     |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "if" – "else"                                   | '
-                     'Conditional expression                |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "or"                                            | '
-                     'Boolean OR                            |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "and"                                           | '
-                     'Boolean AND                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "not" "x"                                       | '
-                     'Boolean NOT                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "in", "not in", "is", "is not", "<", "<=", ">", | '
-                     'Comparisons, including membership     |\n'
-                     '| ">=", "!=", "=="                                | '
-                     'tests and identity tests              |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "|"                                             | '
-                     'Bitwise OR                            |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "^"                                             | '
-                     'Bitwise XOR                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "&"                                             | '
-                     'Bitwise AND                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "<<", ">>"                                      | '
-                     'Shifts                                |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "+", "-"                                        | '
-                     'Addition and subtraction              |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "*", "@", "/", "//", "%"                        | '
-                     'Multiplication, matrix                |\n'
-                     '|                                                 | '
-                     'multiplication, division, floor       |\n'
-                     '|                                                 | '
-                     'division, remainder [5]               |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "+x", "-x", "~x"                                | '
-                     'Positive, negative, bitwise NOT       |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "**"                                            | '
-                     'Exponentiation [6]                    |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "await" "x"                                     | '
-                     'Await expression                      |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "x[index]", "x[index:index]",                   | '
-                     'Subscription, slicing, call,          |\n'
-                     '| "x(arguments...)", "x.attribute"                | '
-                     'attribute reference                   |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
                      '| "(expressions...)",  "[expressions...]", "{key: | '
                      'Binding or parenthesized expression,  |\n'
                      '| value...}", "{expressions...}"                  | list '
@@ -7310,6 +8182,65 @@
                      '|                                                 | '
                      'display                               |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "x[index]", "x[index:index]",                   | '
+                     'Subscription, slicing, call,          |\n'
+                     '| "x(arguments...)", "x.attribute"                | '
+                     'attribute reference                   |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "await" "x"                                     | '
+                     'Await expression                      |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "**"                                            | '
+                     'Exponentiation [5]                    |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "+x", "-x", "~x"                                | '
+                     'Positive, negative, bitwise NOT       |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "*", "@", "/", "//", "%"                        | '
+                     'Multiplication, matrix                |\n'
+                     '|                                                 | '
+                     'multiplication, division, floor       |\n'
+                     '|                                                 | '
+                     'division, remainder [6]               |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "+", "-"                                        | '
+                     'Addition and subtraction              |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "<<", ">>"                                      | '
+                     'Shifts                                |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "&"                                             | '
+                     'Bitwise AND                           |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "^"                                             | '
+                     'Bitwise XOR                           |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "|"                                             | '
+                     'Bitwise OR                            |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "in", "not in", "is", "is not", "<", "<=", ">", | '
+                     'Comparisons, including membership     |\n'
+                     '| ">=", "!=", "=="                                | '
+                     'tests and identity tests              |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "not" "x"                                       | '
+                     'Boolean NOT                           |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "and"                                           | '
+                     'Boolean AND                           |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "or"                                            | '
+                     'Boolean OR                            |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "if" – "else"                                   | '
+                     'Conditional expression                |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "lambda"                                        | '
+                     'Lambda expression                     |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| ":="                                            | '
+                     'Assignment expression                 |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
                      '\n'
                      '-[ Footnotes ]-\n'
                      '\n'
@@ -7389,14 +8320,14 @@
                      'Check their\n'
                      '    documentation for more info.\n'
                      '\n'
-                     '[5] The "%" operator is also used for string formatting; '
-                     'the same\n'
-                     '    precedence applies.\n'
-                     '\n'
-                     '[6] The power operator "**" binds less tightly than an '
+                     '[5] The power operator "**" binds less tightly than an '
                      'arithmetic or\n'
                      '    bitwise unary operator on its right, that is, '
-                     '"2**-1" is "0.5".\n',
+                     '"2**-1" is "0.5".\n'
+                     '\n'
+                     '[6] The "%" operator is also used for string formatting; '
+                     'the same\n'
+                     '    precedence applies.\n',
  'pass': 'The "pass" statement\n'
          '********************\n'
          '\n'
@@ -7444,18 +8375,21 @@
           '"ZeroDivisionError".\n'
           'Raising a negative number to a fractional power results in a '
           '"complex"\n'
-          'number. (In earlier versions it raised a "ValueError".)\n',
+          'number. (In earlier versions it raised a "ValueError".)\n'
+          '\n'
+          'This operation can be customized using the special "__pow__()" '
+          'method.\n',
  'raise': 'The "raise" statement\n'
           '*********************\n'
           '\n'
           '   raise_stmt ::= "raise" [expression ["from" expression]]\n'
           '\n'
-          'If no expressions are present, "raise" re-raises the last '
-          'exception\n'
-          'that was active in the current scope.  If no exception is active '
-          'in\n'
-          'the current scope, a "RuntimeError" exception is raised indicating\n'
-          'that this is an error.\n'
+          'If no expressions are present, "raise" re-raises the exception that '
+          'is\n'
+          'currently being handled, which is also known as the *active\n'
+          'exception*. If there isn’t currently an active exception, a\n'
+          '"RuntimeError" exception is raised indicating that this is an '
+          'error.\n'
           '\n'
           'Otherwise, "raise" evaluates the first expression as the exception\n'
           'object.  It must be either a subclass or an instance of\n'
@@ -7481,12 +8415,18 @@
           '\n'
           'The "from" clause is used for exception chaining: if given, the '
           'second\n'
-          '*expression* must be another exception class or instance, which '
-          'will\n'
-          'then be attached to the raised exception as the "__cause__" '
-          'attribute\n'
-          '(which is writable).  If the raised exception is not handled, both\n'
-          'exceptions will be printed:\n'
+          '*expression* must be another exception class or instance. If the\n'
+          'second expression is an exception instance, it will be attached to '
+          'the\n'
+          'raised exception as the "__cause__" attribute (which is writable). '
+          'If\n'
+          'the expression is an exception class, the class will be '
+          'instantiated\n'
+          'and the resulting exception instance will be attached to the '
+          'raised\n'
+          'exception as the "__cause__" attribute. If the raised exception is '
+          'not\n'
+          'handled, both exceptions will be printed:\n'
           '\n'
           '   >>> try:\n'
           '   ...     print(1 / 0)\n'
@@ -7504,11 +8444,14 @@
           '     File "<stdin>", line 4, in <module>\n'
           '   RuntimeError: Something bad happened\n'
           '\n'
-          'A similar mechanism works implicitly if an exception is raised '
-          'inside\n'
-          'an exception handler or a "finally" clause: the previous exception '
-          'is\n'
-          'then attached as the new exception’s "__context__" attribute:\n'
+          'A similar mechanism works implicitly if a new exception is raised '
+          'when\n'
+          'an exception is already being handled.  An exception may be '
+          'handled\n'
+          'when an "except" or "finally" clause, or a "with" statement, is '
+          'used.\n'
+          'The previous exception is then attached as the new exception’s\n'
+          '"__context__" attribute:\n'
           '\n'
           '   >>> try:\n'
           '   ...     print(1 / 0)\n'
@@ -7590,61 +8533,62 @@
                    '\n'
                    'The following methods can be defined to implement '
                    'container objects.\n'
-                   'Containers usually are sequences (such as lists or tuples) '
-                   'or mappings\n'
-                   '(like dictionaries), but can represent other containers as '
-                   'well.  The\n'
-                   'first set of methods is used either to emulate a sequence '
-                   'or to\n'
-                   'emulate a mapping; the difference is that for a sequence, '
-                   'the\n'
-                   'allowable keys should be the integers *k* for which "0 <= '
-                   'k < N" where\n'
-                   '*N* is the length of the sequence, or slice objects, which '
-                   'define a\n'
-                   'range of items.  It is also recommended that mappings '
-                   'provide the\n'
-                   'methods "keys()", "values()", "items()", "get()", '
-                   '"clear()",\n'
-                   '"setdefault()", "pop()", "popitem()", "copy()", and '
-                   '"update()"\n'
-                   'behaving similar to those for Python’s standard dictionary '
+                   'Containers usually are *sequences* (such as "lists" or '
+                   '"tuples") or\n'
+                   '*mappings* (like "dictionaries"), but can represent other '
+                   'containers\n'
+                   'as well.  The first set of methods is used either to '
+                   'emulate a\n'
+                   'sequence or to emulate a mapping; the difference is that '
+                   'for a\n'
+                   'sequence, the allowable keys should be the integers *k* '
+                   'for which "0\n'
+                   '<= k < N" where *N* is the length of the sequence, or '
+                   '"slice" objects,\n'
+                   'which define a range of items.  It is also recommended '
+                   'that mappings\n'
+                   'provide the methods "keys()", "values()", "items()", '
+                   '"get()",\n'
+                   '"clear()", "setdefault()", "pop()", "popitem()", "copy()", '
+                   'and\n'
+                   '"update()" behaving similar to those for Python’s '
+                   'standard\n'
+                   '"dictionary" objects.  The "collections.abc" module '
+                   'provides a\n'
+                   '"MutableMapping" *abstract base class* to help create '
+                   'those methods\n'
+                   'from a base set of "__getitem__()", "__setitem__()", '
+                   '"__delitem__()",\n'
+                   'and "keys()". Mutable sequences should provide methods '
+                   '"append()",\n'
+                   '"count()", "index()", "extend()", "insert()", "pop()", '
+                   '"remove()",\n'
+                   '"reverse()" and "sort()", like Python standard "list" '
                    'objects.\n'
-                   'The "collections.abc" module provides a "MutableMapping" '
-                   'abstract base\n'
-                   'class to help create those methods from a base set of '
-                   '"__getitem__()",\n'
-                   '"__setitem__()", "__delitem__()", and "keys()". Mutable '
-                   'sequences\n'
-                   'should provide methods "append()", "count()", "index()", '
-                   '"extend()",\n'
-                   '"insert()", "pop()", "remove()", "reverse()" and "sort()", '
-                   'like Python\n'
-                   'standard list objects.  Finally, sequence types should '
-                   'implement\n'
-                   'addition (meaning concatenation) and multiplication '
+                   'Finally, sequence types should implement addition '
                    '(meaning\n'
-                   'repetition) by defining the methods "__add__()", '
-                   '"__radd__()",\n'
-                   '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
-                   'described\n'
-                   'below; they should not define other numerical operators.  '
+                   'concatenation) and multiplication (meaning repetition) by '
+                   'defining the\n'
+                   'methods "__add__()", "__radd__()", "__iadd__()", '
+                   '"__mul__()",\n'
+                   '"__rmul__()" and "__imul__()" described below; they should '
+                   'not define\n'
+                   'other numerical operators.  It is recommended that both '
+                   'mappings and\n'
+                   'sequences implement the "__contains__()" method to allow '
+                   'efficient use\n'
+                   'of the "in" operator; for mappings, "in" should search the '
+                   'mapping’s\n'
+                   'keys; for sequences, it should search through the values.  '
                    'It is\n'
-                   'recommended that both mappings and sequences implement '
+                   'further recommended that both mappings and sequences '
+                   'implement the\n'
+                   '"__iter__()" method to allow efficient iteration through '
                    'the\n'
-                   '"__contains__()" method to allow efficient use of the "in" '
-                   'operator;\n'
-                   'for mappings, "in" should search the mapping’s keys; for '
-                   'sequences, it\n'
-                   'should search through the values.  It is further '
-                   'recommended that both\n'
-                   'mappings and sequences implement the "__iter__()" method '
-                   'to allow\n'
-                   'efficient iteration through the container; for mappings, '
-                   '"__iter__()"\n'
-                   'should iterate through the object’s keys; for sequences, '
-                   'it should\n'
-                   'iterate through the values.\n'
+                   'container; for mappings, "__iter__()" should iterate '
+                   'through the\n'
+                   'object’s keys; for sequences, it should iterate through '
+                   'the values.\n'
                    '\n'
                    'object.__len__(self)\n'
                    '\n'
@@ -7703,22 +8647,24 @@
                    'object.__getitem__(self, key)\n'
                    '\n'
                    '   Called to implement evaluation of "self[key]". For '
-                   'sequence types,\n'
-                   '   the accepted keys should be integers and slice '
-                   'objects.  Note that\n'
-                   '   the special interpretation of negative indexes (if the '
-                   'class wishes\n'
-                   '   to emulate a sequence type) is up to the '
-                   '"__getitem__()" method. If\n'
-                   '   *key* is of an inappropriate type, "TypeError" may be '
-                   'raised; if of\n'
-                   '   a value outside the set of indexes for the sequence '
-                   '(after any\n'
-                   '   special interpretation of negative values), '
-                   '"IndexError" should be\n'
-                   '   raised. For mapping types, if *key* is missing (not in '
+                   '*sequence*\n'
+                   '   types, the accepted keys should be integers and slice '
+                   'objects.\n'
+                   '   Note that the special interpretation of negative '
+                   'indexes (if the\n'
+                   '   class wishes to emulate a *sequence* type) is up to '
                    'the\n'
-                   '   container), "KeyError" should be raised.\n'
+                   '   "__getitem__()" method. If *key* is of an inappropriate '
+                   'type,\n'
+                   '   "TypeError" may be raised; if of a value outside the '
+                   'set of indexes\n'
+                   '   for the sequence (after any special interpretation of '
+                   'negative\n'
+                   '   values), "IndexError" should be raised. For *mapping* '
+                   'types, if\n'
+                   '   *key* is missing (not in the container), "KeyError" '
+                   'should be\n'
+                   '   raised.\n'
                    '\n'
                    '   Note:\n'
                    '\n'
@@ -7728,6 +8674,15 @@
                    'of the\n'
                    '     sequence.\n'
                    '\n'
+                   '   Note:\n'
+                   '\n'
+                   '     When subscripting a *class*, the special class '
+                   'method\n'
+                   '     "__class_getitem__()" may be called instead of '
+                   '"__getitem__()".\n'
+                   '     See __class_getitem__ versus __getitem__ for more '
+                   'details.\n'
+                   '\n'
                    'object.__setitem__(self, key, value)\n'
                    '\n'
                    '   Called to implement assignment to "self[key]".  Same '
@@ -7763,19 +8718,13 @@
                    '\n'
                    'object.__iter__(self)\n'
                    '\n'
-                   '   This method is called when an iterator is required for '
-                   'a container.\n'
-                   '   This method should return a new iterator object that '
-                   'can iterate\n'
-                   '   over all the objects in the container.  For mappings, '
-                   'it should\n'
-                   '   iterate over the keys of the container.\n'
-                   '\n'
-                   '   Iterator objects also need to implement this method; '
-                   'they are\n'
-                   '   required to return themselves.  For more information on '
-                   'iterator\n'
-                   '   objects, see Iterator Types.\n'
+                   '   This method is called when an *iterator* is required '
+                   'for a\n'
+                   '   container. This method should return a new iterator '
+                   'object that can\n'
+                   '   iterate over all the objects in the container.  For '
+                   'mappings, it\n'
+                   '   should iterate over the keys of the container.\n'
                    '\n'
                    'object.__reversed__(self)\n'
                    '\n'
@@ -7838,6 +8787,10 @@
              'the\n'
              'second argument.\n'
              '\n'
+             'This operation can be customized using the special '
+             '"__lshift__()" and\n'
+             '"__rshift__()" methods.\n'
+             '\n'
              'A right shift by *n* bits is defined as floor division by '
              '"pow(2,n)".\n'
              'A left shift by *n* bits is defined as multiplication with '
@@ -7950,7 +8903,7 @@
                  'immediate\n'
                  '   subclasses.  This method returns a list of all those '
                  'references\n'
-                 '   still alive. Example:\n'
+                 '   still alive.  The list is in definition order.  Example:\n'
                  '\n'
                  '      >>> int.__subclasses__()\n'
                  "      [<class 'bool'>]\n"
@@ -8052,13 +9005,13 @@
                  '\n'
                  '   If "__new__()" is invoked during object construction and '
                  'it returns\n'
-                 '   an instance or subclass of *cls*, then the new '
-                 'instance’s\n'
-                 '   "__init__()" method will be invoked like "__init__(self[, '
-                 '...])",\n'
-                 '   where *self* is the new instance and the remaining '
-                 'arguments are\n'
-                 '   the same as were passed to the object constructor.\n'
+                 '   an instance of *cls*, then the new instance’s '
+                 '"__init__()" method\n'
+                 '   will be invoked like "__init__(self[, ...])", where '
+                 '*self* is the\n'
+                 '   new instance and the remaining arguments are the same as '
+                 'were\n'
+                 '   passed to the object constructor.\n'
                  '\n'
                  '   If "__new__()" does not return an instance of *cls*, then '
                  'the new\n'
@@ -8726,32 +9679,6 @@
                  'of the\n'
                  '   owner class.\n'
                  '\n'
-                 'object.__set_name__(self, owner, name)\n'
-                 '\n'
-                 '   Called at the time the owning class *owner* is created. '
-                 'The\n'
-                 '   descriptor has been assigned to *name*.\n'
-                 '\n'
-                 '   Note:\n'
-                 '\n'
-                 '     "__set_name__()" is only called implicitly as part of '
-                 'the "type"\n'
-                 '     constructor, so it will need to be called explicitly '
-                 'with the\n'
-                 '     appropriate parameters when a descriptor is added to a '
-                 'class\n'
-                 '     after initial creation:\n'
-                 '\n'
-                 '        class A:\n'
-                 '           pass\n'
-                 '        descr = custom_descriptor()\n'
-                 '        A.attr = descr\n'
-                 "        descr.__set_name__(A, 'attr')\n"
-                 '\n'
-                 '     See Creating the class object for more details.\n'
-                 '\n'
-                 '   New in version 3.6.\n'
-                 '\n'
                  'The attribute "__objclass__" is interpreted by the "inspect" '
                  'module as\n'
                  'specifying the class where this object was defined (setting '
@@ -8822,16 +9749,16 @@
                  '"super(B,\n'
                  '   obj).m()" searches "obj.__class__.__mro__" for the base '
                  'class "A"\n'
-                 '   immediately preceding "B" and then invokes the descriptor '
+                 '   immediately following "B" and then invokes the descriptor '
                  'with the\n'
                  '   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n'
                  '\n'
                  'For instance bindings, the precedence of descriptor '
                  'invocation depends\n'
-                 'on the which descriptor methods are defined.  A descriptor '
-                 'can define\n'
-                 'any combination of "__get__()", "__set__()" and '
-                 '"__delete__()".  If it\n'
+                 'on which descriptor methods are defined.  A descriptor can '
+                 'define any\n'
+                 'combination of "__get__()", "__set__()" and "__delete__()".  '
+                 'If it\n'
                  'does not define "__get__()", then accessing the attribute '
                  'will return\n'
                  'the descriptor object itself unless there is a value in the '
@@ -8852,13 +9779,14 @@
                  'be\n'
                  'overridden by instances.\n'
                  '\n'
-                 'Python methods (including "staticmethod()" and '
-                 '"classmethod()") are\n'
-                 'implemented as non-data descriptors.  Accordingly, instances '
-                 'can\n'
-                 'redefine and override methods.  This allows individual '
-                 'instances to\n'
-                 'acquire behaviors that differ from other instances of the '
+                 'Python methods (including those decorated with '
+                 '"@staticmethod" and\n'
+                 '"@classmethod") are implemented as non-data descriptors.  '
+                 'Accordingly,\n'
+                 'instances can redefine and override methods.  This allows '
+                 'individual\n'
+                 'instances to acquire behaviors that differ from other '
+                 'instances of the\n'
                  'same class.\n'
                  '\n'
                  'The "property()" function is implemented as a data '
@@ -8872,12 +9800,12 @@
                  '\n'
                  '*__slots__* allow us to explicitly declare data members '
                  '(like\n'
-                 'properties) and deny the creation of *__dict__* and '
+                 'properties) and deny the creation of "__dict__" and '
                  '*__weakref__*\n'
                  '(unless explicitly declared in *__slots__* or available in a '
                  'parent.)\n'
                  '\n'
-                 'The space saved over using *__dict__* can be significant. '
+                 'The space saved over using "__dict__" can be significant. '
                  'Attribute\n'
                  'lookup speed can be significantly improved as well.\n'
                  '\n'
@@ -8889,7 +9817,7 @@
                  '*__slots__*\n'
                  '   reserves space for the declared variables and prevents '
                  'the\n'
-                 '   automatic creation of *__dict__* and *__weakref__* for '
+                 '   automatic creation of "__dict__" and *__weakref__* for '
                  'each\n'
                  '   instance.\n'
                  '\n'
@@ -8898,11 +9826,11 @@
                  '~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
                  '\n'
                  '* When inheriting from a class without *__slots__*, the '
-                 '*__dict__* and\n'
+                 '"__dict__" and\n'
                  '  *__weakref__* attribute of the instances will always be '
                  'accessible.\n'
                  '\n'
-                 '* Without a *__dict__* variable, instances cannot be '
+                 '* Without a "__dict__" variable, instances cannot be '
                  'assigned new\n'
                  '  variables not listed in the *__slots__* definition.  '
                  'Attempts to\n'
@@ -8915,28 +9843,28 @@
                  '\n'
                  '* Without a *__weakref__* variable for each instance, '
                  'classes defining\n'
-                 '  *__slots__* do not support weak references to its '
-                 'instances. If weak\n'
-                 '  reference support is needed, then add "\'__weakref__\'" to '
-                 'the\n'
+                 '  *__slots__* do not support "weak references" to its '
+                 'instances. If\n'
+                 '  weak reference support is needed, then add '
+                 '"\'__weakref__\'" to the\n'
                  '  sequence of strings in the *__slots__* declaration.\n'
                  '\n'
                  '* *__slots__* are implemented at the class level by '
                  'creating\n'
-                 '  descriptors (Implementing Descriptors) for each variable '
-                 'name.  As a\n'
-                 '  result, class attributes cannot be used to set default '
-                 'values for\n'
-                 '  instance variables defined by *__slots__*; otherwise, the '
-                 'class\n'
-                 '  attribute would overwrite the descriptor assignment.\n'
+                 '  descriptors for each variable name.  As a result, class '
+                 'attributes\n'
+                 '  cannot be used to set default values for instance '
+                 'variables defined\n'
+                 '  by *__slots__*; otherwise, the class attribute would '
+                 'overwrite the\n'
+                 '  descriptor assignment.\n'
                  '\n'
                  '* The action of a *__slots__* declaration is not limited to '
                  'the class\n'
                  '  where it is defined.  *__slots__* declared in parents are '
                  'available\n'
                  '  in child classes. However, child subclasses will get a '
-                 '*__dict__*\n'
+                 '"__dict__"\n'
                  '  and *__weakref__* unless they also define *__slots__* '
                  '(which should\n'
                  '  only contain names of any *additional* slots).\n'
@@ -8956,13 +9884,18 @@
                  '  “variable-length” built-in types such as "int", "bytes" '
                  'and "tuple".\n'
                  '\n'
-                 '* Any non-string iterable may be assigned to *__slots__*. '
-                 'Mappings may\n'
-                 '  also be used; however, in the future, special meaning may '
-                 'be\n'
-                 '  assigned to the values corresponding to each key.\n'
+                 '* Any non-string *iterable* may be assigned to *__slots__*.\n'
                  '\n'
-                 '* *__class__* assignment works only if both classes have the '
+                 '* If a "dictionary" is used to assign *__slots__*, the '
+                 'dictionary keys\n'
+                 '  will be used as the slot names. The values of the '
+                 'dictionary can be\n'
+                 '  used to provide per-attribute docstrings that will be '
+                 'recognised by\n'
+                 '  "inspect.getdoc()" and displayed in the output of '
+                 '"help()".\n'
+                 '\n'
+                 '* "__class__" assignment works only if both classes have the '
                  'same\n'
                  '  *__slots__*.\n'
                  '\n'
@@ -8974,9 +9907,9 @@
                  'violations\n'
                  '  raise "TypeError".\n'
                  '\n'
-                 '* If an iterator is used for *__slots__* then a descriptor '
-                 'is created\n'
-                 '  for each of the iterator’s values. However, the '
+                 '* If an *iterator* is used for *__slots__* then a '
+                 '*descriptor* is\n'
+                 '  created for each of the iterator’s values. However, the '
                  '*__slots__*\n'
                  '  attribute will be an empty iterator.\n'
                  '\n'
@@ -8985,15 +9918,15 @@
                  '==========================\n'
                  '\n'
                  'Whenever a class inherits from another class, '
-                 '*__init_subclass__* is\n'
-                 'called on that class. This way, it is possible to write '
-                 'classes which\n'
-                 'change the behavior of subclasses. This is closely related '
-                 'to class\n'
-                 'decorators, but where class decorators only affect the '
-                 'specific class\n'
-                 'they’re applied to, "__init_subclass__" solely applies to '
-                 'future\n'
+                 '"__init_subclass__()" is\n'
+                 'called on the parent class. This way, it is possible to '
+                 'write classes\n'
+                 'which change the behavior of subclasses. This is closely '
+                 'related to\n'
+                 'class decorators, but where class decorators only affect the '
+                 'specific\n'
+                 'class they’re applied to, "__init_subclass__" solely applies '
+                 'to future\n'
                  'subclasses of the class defining the method.\n'
                  '\n'
                  'classmethod object.__init_subclass__(cls)\n'
@@ -9041,6 +9974,38 @@
                  '\n'
                  '   New in version 3.6.\n'
                  '\n'
+                 'When a class is created, "type.__new__()" scans the class '
+                 'variables\n'
+                 'and makes callbacks to those with a "__set_name__()" hook.\n'
+                 '\n'
+                 'object.__set_name__(self, owner, name)\n'
+                 '\n'
+                 '   Automatically called at the time the owning class *owner* '
+                 'is\n'
+                 '   created. The object has been assigned to *name* in that '
+                 'class:\n'
+                 '\n'
+                 '      class A:\n'
+                 '          x = C()  # Automatically calls: x.__set_name__(A, '
+                 "'x')\n"
+                 '\n'
+                 '   If the class variable is assigned after the class is '
+                 'created,\n'
+                 '   "__set_name__()" will not be called automatically. If '
+                 'needed,\n'
+                 '   "__set_name__()" can be called directly:\n'
+                 '\n'
+                 '      class A:\n'
+                 '         pass\n'
+                 '\n'
+                 '      c = C()\n'
+                 '      A.x = c                  # The hook is not called\n'
+                 "      c.__set_name__(A, 'x')   # Manually invoke the hook\n"
+                 '\n'
+                 '   See Creating the class object for more details.\n'
+                 '\n'
+                 '   New in version 3.6.\n'
+                 '\n'
                  '\n'
                  'Metaclasses\n'
                  '-----------\n'
@@ -9153,10 +10118,10 @@
                  'come from\n'
                  'the class definition). The "__prepare__" method should be '
                  'implemented\n'
-                 'as a "classmethod()". The namespace returned by '
-                 '"__prepare__" is\n'
-                 'passed in to "__new__", but when the final class object is '
-                 'created the\n'
+                 'as a "classmethod". The namespace returned by "__prepare__" '
+                 'is passed\n'
+                 'in to "__new__", but when the final class object is created '
+                 'the\n'
                  'namespace is copied into a new "dict".\n'
                  '\n'
                  'If the metaclass has no "__prepare__" attribute, then the '
@@ -9236,22 +10201,21 @@
                  'When using the default metaclass "type", or any metaclass '
                  'that\n'
                  'ultimately calls "type.__new__", the following additional\n'
-                 'customisation steps are invoked after creating the class '
+                 'customization steps are invoked after creating the class '
                  'object:\n'
                  '\n'
-                 '* first, "type.__new__" collects all of the descriptors in '
-                 'the class\n'
-                 '  namespace that define a "__set_name__()" method;\n'
+                 '1. The "type.__new__" method collects all of the attributes '
+                 'in the\n'
+                 '   class namespace that define a "__set_name__()" method;\n'
                  '\n'
-                 '* second, all of these "__set_name__" methods are called '
-                 'with the\n'
-                 '  class being defined and the assigned name of that '
-                 'particular\n'
-                 '  descriptor;\n'
+                 '2. Those "__set_name__" methods are called with the class '
+                 'being\n'
+                 '   defined and the assigned name of that particular '
+                 'attribute;\n'
                  '\n'
-                 '* finally, the "__init_subclass__()" hook is called on the '
-                 'immediate\n'
-                 '  parent of the new class in its method resolution order.\n'
+                 '3. The "__init_subclass__()" hook is called on the immediate '
+                 'parent of\n'
+                 '   the new class in its method resolution order.\n'
                  '\n'
                  'After the class object is created, it is passed to the '
                  'class\n'
@@ -9344,9 +10308,33 @@
                  'Emulating generic types\n'
                  '=======================\n'
                  '\n'
-                 'One can implement the generic class syntax as specified by '
-                 '**PEP 484**\n'
-                 '(for example "List[int]") by defining a special method:\n'
+                 'When using *type annotations*, it is often useful to '
+                 '*parameterize* a\n'
+                 '*generic type* using Python’s square-brackets notation. For '
+                 'example,\n'
+                 'the annotation "list[int]" might be used to signify a "list" '
+                 'in which\n'
+                 'all the elements are of type "int".\n'
+                 '\n'
+                 'See also:\n'
+                 '\n'
+                 '  **PEP 484** - Type Hints\n'
+                 '     Introducing Python’s framework for type annotations\n'
+                 '\n'
+                 '  Generic Alias Types\n'
+                 '     Documentation for objects representing parameterized '
+                 'generic\n'
+                 '     classes\n'
+                 '\n'
+                 '  Generics, user-defined generics and "typing.Generic"\n'
+                 '     Documentation on how to implement generic classes that '
+                 'can be\n'
+                 '     parameterized at runtime and understood by static '
+                 'type-checkers.\n'
+                 '\n'
+                 'A class can *generally* only be parameterized if it defines '
+                 'the\n'
+                 'special class method "__class_getitem__()".\n'
                  '\n'
                  'classmethod object.__class_getitem__(cls, key)\n'
                  '\n'
@@ -9354,18 +10342,144 @@
                  'generic class\n'
                  '   by type arguments found in *key*.\n'
                  '\n'
-                 'This method is looked up on the class object itself, and '
-                 'when defined\n'
-                 'in the class body, this method is implicitly a class '
-                 'method.  Note,\n'
-                 'this mechanism is primarily reserved for use with static '
-                 'type hints,\n'
-                 'other usage is discouraged.\n'
+                 '   When defined on a class, "__class_getitem__()" is '
+                 'automatically a\n'
+                 '   class method. As such, there is no need for it to be '
+                 'decorated with\n'
+                 '   "@classmethod" when it is defined.\n'
+                 '\n'
+                 '\n'
+                 'The purpose of *__class_getitem__*\n'
+                 '----------------------------------\n'
+                 '\n'
+                 'The purpose of "__class_getitem__()" is to allow runtime\n'
+                 'parameterization of standard-library generic classes in '
+                 'order to more\n'
+                 'easily apply *type hints* to these classes.\n'
+                 '\n'
+                 'To implement custom generic classes that can be '
+                 'parameterized at\n'
+                 'runtime and understood by static type-checkers, users should '
+                 'either\n'
+                 'inherit from a standard library class that already '
+                 'implements\n'
+                 '"__class_getitem__()", or inherit from "typing.Generic", '
+                 'which has its\n'
+                 'own implementation of "__class_getitem__()".\n'
+                 '\n'
+                 'Custom implementations of "__class_getitem__()" on classes '
+                 'defined\n'
+                 'outside of the standard library may not be understood by '
+                 'third-party\n'
+                 'type-checkers such as mypy. Using "__class_getitem__()" on '
+                 'any class\n'
+                 'for purposes other than type hinting is discouraged.\n'
+                 '\n'
+                 '\n'
+                 '*__class_getitem__* versus *__getitem__*\n'
+                 '----------------------------------------\n'
+                 '\n'
+                 'Usually, the subscription of an object using square brackets '
+                 'will call\n'
+                 'the "__getitem__()" instance method defined on the object’s '
+                 'class.\n'
+                 'However, if the object being subscribed is itself a class, '
+                 'the class\n'
+                 'method "__class_getitem__()" may be called instead.\n'
+                 '"__class_getitem__()" should return a GenericAlias object if '
+                 'it is\n'
+                 'properly defined.\n'
+                 '\n'
+                 'Presented with the *expression* "obj[x]", the Python '
+                 'interpreter\n'
+                 'follows something like the following process to decide '
+                 'whether\n'
+                 '"__getitem__()" or "__class_getitem__()" should be called:\n'
+                 '\n'
+                 '   from inspect import isclass\n'
+                 '\n'
+                 '   def subscribe(obj, x):\n'
+                 '       """Return the result of the expression `obj[x]`"""\n'
+                 '\n'
+                 '       class_of_obj = type(obj)\n'
+                 '\n'
+                 '       # If the class of obj defines __getitem__,\n'
+                 '       # call class_of_obj.__getitem__(obj, x)\n'
+                 "       if hasattr(class_of_obj, '__getitem__'):\n"
+                 '           return class_of_obj.__getitem__(obj, x)\n'
+                 '\n'
+                 '       # Else, if obj is a class and defines '
+                 '__class_getitem__,\n'
+                 '       # call obj.__class_getitem__(x)\n'
+                 '       elif isclass(obj) and hasattr(obj, '
+                 "'__class_getitem__'):\n"
+                 '           return obj.__class_getitem__(x)\n'
+                 '\n'
+                 '       # Else, raise an exception\n'
+                 '       else:\n'
+                 '           raise TypeError(\n'
+                 '               f"\'{class_of_obj.__name__}\' object is not '
+                 'subscriptable"\n'
+                 '           )\n'
+                 '\n'
+                 'In Python, all classes are themselves instances of other '
+                 'classes. The\n'
+                 'class of a class is known as that class’s *metaclass*, and '
+                 'most\n'
+                 'classes have the "type" class as their metaclass. "type" '
+                 'does not\n'
+                 'define "__getitem__()", meaning that expressions such as '
+                 '"list[int]",\n'
+                 '"dict[str, float]" and "tuple[str, bytes]" all result in\n'
+                 '"__class_getitem__()" being called:\n'
+                 '\n'
+                 '   >>> # list has class "type" as its metaclass, like most '
+                 'classes:\n'
+                 '   >>> type(list)\n'
+                 "   <class 'type'>\n"
+                 '   >>> type(dict) == type(list) == type(tuple) == type(str) '
+                 '== type(bytes)\n'
+                 '   True\n'
+                 '   >>> # "list[int]" calls "list.__class_getitem__(int)"\n'
+                 '   >>> list[int]\n'
+                 '   list[int]\n'
+                 '   >>> # list.__class_getitem__ returns a GenericAlias '
+                 'object:\n'
+                 '   >>> type(list[int])\n'
+                 "   <class 'types.GenericAlias'>\n"
+                 '\n'
+                 'However, if a class has a custom metaclass that defines\n'
+                 '"__getitem__()", subscribing the class may result in '
+                 'different\n'
+                 'behaviour. An example of this can be found in the "enum" '
+                 'module:\n'
+                 '\n'
+                 '   >>> from enum import Enum\n'
+                 '   >>> class Menu(Enum):\n'
+                 '   ...     """A breakfast menu"""\n'
+                 "   ...     SPAM = 'spam'\n"
+                 "   ...     BACON = 'bacon'\n"
+                 '   ...\n'
+                 '   >>> # Enum classes have a custom metaclass:\n'
+                 '   >>> type(Menu)\n'
+                 "   <class 'enum.EnumMeta'>\n"
+                 '   >>> # EnumMeta defines __getitem__,\n'
+                 '   >>> # so __class_getitem__ is not called,\n'
+                 '   >>> # and the result is not a GenericAlias object:\n'
+                 "   >>> Menu['SPAM']\n"
+                 "   <Menu.SPAM: 'spam'>\n"
+                 "   >>> type(Menu['SPAM'])\n"
+                 "   <enum 'Menu'>\n"
                  '\n'
                  'See also:\n'
                  '\n'
-                 '  **PEP 560** - Core support for typing module and generic '
+                 '  **PEP 560** - Core Support for typing module and generic '
                  'types\n'
+                 '     Introducing "__class_getitem__()", and outlining when '
+                 'a\n'
+                 '     subscription results in "__class_getitem__()" being '
+                 'called\n'
+                 '     instead of "__getitem__()"\n'
                  '\n'
                  '\n'
                  'Emulating callable objects\n'
@@ -9384,60 +10498,60 @@
                  '\n'
                  'The following methods can be defined to implement container '
                  'objects.\n'
-                 'Containers usually are sequences (such as lists or tuples) '
-                 'or mappings\n'
-                 '(like dictionaries), but can represent other containers as '
-                 'well.  The\n'
-                 'first set of methods is used either to emulate a sequence or '
-                 'to\n'
-                 'emulate a mapping; the difference is that for a sequence, '
-                 'the\n'
-                 'allowable keys should be the integers *k* for which "0 <= k '
-                 '< N" where\n'
-                 '*N* is the length of the sequence, or slice objects, which '
-                 'define a\n'
-                 'range of items.  It is also recommended that mappings '
-                 'provide the\n'
-                 'methods "keys()", "values()", "items()", "get()", '
-                 '"clear()",\n'
-                 '"setdefault()", "pop()", "popitem()", "copy()", and '
-                 '"update()"\n'
-                 'behaving similar to those for Python’s standard dictionary '
+                 'Containers usually are *sequences* (such as "lists" or '
+                 '"tuples") or\n'
+                 '*mappings* (like "dictionaries"), but can represent other '
+                 'containers\n'
+                 'as well.  The first set of methods is used either to emulate '
+                 'a\n'
+                 'sequence or to emulate a mapping; the difference is that for '
+                 'a\n'
+                 'sequence, the allowable keys should be the integers *k* for '
+                 'which "0\n'
+                 '<= k < N" where *N* is the length of the sequence, or '
+                 '"slice" objects,\n'
+                 'which define a range of items.  It is also recommended that '
+                 'mappings\n'
+                 'provide the methods "keys()", "values()", "items()", '
+                 '"get()",\n'
+                 '"clear()", "setdefault()", "pop()", "popitem()", "copy()", '
+                 'and\n'
+                 '"update()" behaving similar to those for Python’s standard\n'
+                 '"dictionary" objects.  The "collections.abc" module provides '
+                 'a\n'
+                 '"MutableMapping" *abstract base class* to help create those '
+                 'methods\n'
+                 'from a base set of "__getitem__()", "__setitem__()", '
+                 '"__delitem__()",\n'
+                 'and "keys()". Mutable sequences should provide methods '
+                 '"append()",\n'
+                 '"count()", "index()", "extend()", "insert()", "pop()", '
+                 '"remove()",\n'
+                 '"reverse()" and "sort()", like Python standard "list" '
                  'objects.\n'
-                 'The "collections.abc" module provides a "MutableMapping" '
-                 'abstract base\n'
-                 'class to help create those methods from a base set of '
-                 '"__getitem__()",\n'
-                 '"__setitem__()", "__delitem__()", and "keys()". Mutable '
-                 'sequences\n'
-                 'should provide methods "append()", "count()", "index()", '
-                 '"extend()",\n'
-                 '"insert()", "pop()", "remove()", "reverse()" and "sort()", '
-                 'like Python\n'
-                 'standard list objects.  Finally, sequence types should '
-                 'implement\n'
-                 'addition (meaning concatenation) and multiplication '
-                 '(meaning\n'
-                 'repetition) by defining the methods "__add__()", '
-                 '"__radd__()",\n'
-                 '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
-                 'described\n'
-                 'below; they should not define other numerical operators.  It '
-                 'is\n'
-                 'recommended that both mappings and sequences implement the\n'
-                 '"__contains__()" method to allow efficient use of the "in" '
-                 'operator;\n'
-                 'for mappings, "in" should search the mapping’s keys; for '
-                 'sequences, it\n'
-                 'should search through the values.  It is further recommended '
-                 'that both\n'
-                 'mappings and sequences implement the "__iter__()" method to '
-                 'allow\n'
-                 'efficient iteration through the container; for mappings, '
-                 '"__iter__()"\n'
-                 'should iterate through the object’s keys; for sequences, it '
-                 'should\n'
-                 'iterate through the values.\n'
+                 'Finally, sequence types should implement addition (meaning\n'
+                 'concatenation) and multiplication (meaning repetition) by '
+                 'defining the\n'
+                 'methods "__add__()", "__radd__()", "__iadd__()", '
+                 '"__mul__()",\n'
+                 '"__rmul__()" and "__imul__()" described below; they should '
+                 'not define\n'
+                 'other numerical operators.  It is recommended that both '
+                 'mappings and\n'
+                 'sequences implement the "__contains__()" method to allow '
+                 'efficient use\n'
+                 'of the "in" operator; for mappings, "in" should search the '
+                 'mapping’s\n'
+                 'keys; for sequences, it should search through the values.  '
+                 'It is\n'
+                 'further recommended that both mappings and sequences '
+                 'implement the\n'
+                 '"__iter__()" method to allow efficient iteration through '
+                 'the\n'
+                 'container; for mappings, "__iter__()" should iterate through '
+                 'the\n'
+                 'object’s keys; for sequences, it should iterate through the '
+                 'values.\n'
                  '\n'
                  'object.__len__(self)\n'
                  '\n'
@@ -9495,22 +10609,23 @@
                  'object.__getitem__(self, key)\n'
                  '\n'
                  '   Called to implement evaluation of "self[key]". For '
-                 'sequence types,\n'
-                 '   the accepted keys should be integers and slice objects.  '
-                 'Note that\n'
-                 '   the special interpretation of negative indexes (if the '
-                 'class wishes\n'
-                 '   to emulate a sequence type) is up to the "__getitem__()" '
-                 'method. If\n'
-                 '   *key* is of an inappropriate type, "TypeError" may be '
-                 'raised; if of\n'
-                 '   a value outside the set of indexes for the sequence '
-                 '(after any\n'
-                 '   special interpretation of negative values), "IndexError" '
+                 '*sequence*\n'
+                 '   types, the accepted keys should be integers and slice '
+                 'objects.\n'
+                 '   Note that the special interpretation of negative indexes '
+                 '(if the\n'
+                 '   class wishes to emulate a *sequence* type) is up to the\n'
+                 '   "__getitem__()" method. If *key* is of an inappropriate '
+                 'type,\n'
+                 '   "TypeError" may be raised; if of a value outside the set '
+                 'of indexes\n'
+                 '   for the sequence (after any special interpretation of '
+                 'negative\n'
+                 '   values), "IndexError" should be raised. For *mapping* '
+                 'types, if\n'
+                 '   *key* is missing (not in the container), "KeyError" '
                  'should be\n'
-                 '   raised. For mapping types, if *key* is missing (not in '
-                 'the\n'
-                 '   container), "KeyError" should be raised.\n'
+                 '   raised.\n'
                  '\n'
                  '   Note:\n'
                  '\n'
@@ -9520,6 +10635,14 @@
                  'the\n'
                  '     sequence.\n'
                  '\n'
+                 '   Note:\n'
+                 '\n'
+                 '     When subscripting a *class*, the special class method\n'
+                 '     "__class_getitem__()" may be called instead of '
+                 '"__getitem__()".\n'
+                 '     See __class_getitem__ versus __getitem__ for more '
+                 'details.\n'
+                 '\n'
                  'object.__setitem__(self, key, value)\n'
                  '\n'
                  '   Called to implement assignment to "self[key]".  Same note '
@@ -9555,19 +10678,13 @@
                  '\n'
                  'object.__iter__(self)\n'
                  '\n'
-                 '   This method is called when an iterator is required for a '
-                 'container.\n'
-                 '   This method should return a new iterator object that can '
-                 'iterate\n'
-                 '   over all the objects in the container.  For mappings, it '
-                 'should\n'
-                 '   iterate over the keys of the container.\n'
-                 '\n'
-                 '   Iterator objects also need to implement this method; they '
-                 'are\n'
-                 '   required to return themselves.  For more information on '
-                 'iterator\n'
-                 '   objects, see Iterator Types.\n'
+                 '   This method is called when an *iterator* is required for '
+                 'a\n'
+                 '   container. This method should return a new iterator '
+                 'object that can\n'
+                 '   iterate over all the objects in the container.  For '
+                 'mappings, it\n'
+                 '   should iterate over the keys of the container.\n'
                  '\n'
                  'object.__reversed__(self)\n'
                  '\n'
@@ -9760,16 +10877,6 @@
                  'the data\n'
                  '   model.\n'
                  '\n'
-                 '   Note:\n'
-                 '\n'
-                 '     Due to a bug in the dispatching mechanism for "**=", a '
-                 'class that\n'
-                 '     defines "__ipow__()" but returns "NotImplemented" would '
-                 'fail to\n'
-                 '     fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
-                 'bug is\n'
-                 '     fixed in Python 3.10.\n'
-                 '\n'
                  'object.__neg__(self)\n'
                  'object.__pos__(self)\n'
                  'object.__abs__(self)\n'
@@ -9820,9 +10927,9 @@
                  '   of the object truncated to an "Integral" (typically an '
                  '"int").\n'
                  '\n'
-                 '   If "__int__()" is not defined then the built-in function '
-                 '"int()"\n'
-                 '   falls back to "__trunc__()".\n'
+                 '   The built-in function "int()" falls back to "__trunc__()" '
+                 'if\n'
+                 '   neither "__int__()" nor "__index__()" is defined.\n'
                  '\n'
                  '\n'
                  'With Statement Context Managers\n'
@@ -9888,6 +10995,51 @@
                  '     statement.\n'
                  '\n'
                  '\n'
+                 'Customizing positional arguments in class pattern matching\n'
+                 '==========================================================\n'
+                 '\n'
+                 'When using a class name in a pattern, positional arguments '
+                 'in the\n'
+                 'pattern are not allowed by default, i.e. "case MyClass(x, '
+                 'y)" is\n'
+                 'typically invalid without special support in "MyClass". To '
+                 'be able to\n'
+                 'use that kind of patterns, the class needs to define a\n'
+                 '*__match_args__* attribute.\n'
+                 '\n'
+                 'object.__match_args__\n'
+                 '\n'
+                 '   This class variable can be assigned a tuple of strings. '
+                 'When this\n'
+                 '   class is used in a class pattern with positional '
+                 'arguments, each\n'
+                 '   positional argument will be converted into a keyword '
+                 'argument,\n'
+                 '   using the corresponding value in *__match_args__* as the '
+                 'keyword.\n'
+                 '   The absence of this attribute is equivalent to setting it '
+                 'to "()".\n'
+                 '\n'
+                 'For example, if "MyClass.__match_args__" is "("left", '
+                 '"center",\n'
+                 '"right")" that means that "case MyClass(x, y)" is equivalent '
+                 'to "case\n'
+                 'MyClass(left=x, center=y)". Note that the number of '
+                 'arguments in the\n'
+                 'pattern must be smaller than or equal to the number of '
+                 'elements in\n'
+                 '*__match_args__*; if it is larger, the pattern match attempt '
+                 'will\n'
+                 'raise a "TypeError".\n'
+                 '\n'
+                 'New in version 3.10.\n'
+                 '\n'
+                 'See also:\n'
+                 '\n'
+                 '  **PEP 634** - Structural Pattern Matching\n'
+                 '     The specification for the Python "match" statement.\n'
+                 '\n'
+                 '\n'
                  'Special method lookup\n'
                  '=====================\n'
                  '\n'
@@ -10058,7 +11210,7 @@
                    '*start* and\n'
                    '   *end* are interpreted as in slice notation.\n'
                    '\n'
-                   'str.encode(encoding="utf-8", errors="strict")\n'
+                   "str.encode(encoding='utf-8', errors='strict')\n"
                    '\n'
                    '   Return an encoded version of the string as a bytes '
                    'object. Default\n'
@@ -10307,9 +11459,9 @@
                    '      >>> from keyword import iskeyword\n'
                    '\n'
                    "      >>> 'hello'.isidentifier(), iskeyword('hello')\n"
-                   '      True, False\n'
+                   '      (True, False)\n'
                    "      >>> 'def'.isidentifier(), iskeyword('def')\n"
-                   '      True, True\n'
+                   '      (True, True)\n'
                    '\n'
                    'str.islower()\n'
                    '\n'
@@ -10564,7 +11716,7 @@
                    'followed by\n'
                    '   the string itself.\n'
                    '\n'
-                   'str.rsplit(sep=None, maxsplit=-1)\n'
+                   'str.rsplit(sep=None, maxsplit=- 1)\n'
                    '\n'
                    '   Return a list of the words in the string, using *sep* '
                    'as the\n'
@@ -10605,7 +11757,7 @@
                    "      >>> 'Monty Python'.removesuffix(' Python')\n"
                    "      'Monty'\n"
                    '\n'
-                   'str.split(sep=None, maxsplit=-1)\n'
+                   'str.split(sep=None, maxsplit=- 1)\n'
                    '\n'
                    '   Return a list of the words in the string, using *sep* '
                    'as the\n'
@@ -10660,7 +11812,7 @@
                    "      >>> '   1   2   3   '.split()\n"
                    "      ['1', '2', '3']\n"
                    '\n'
-                   'str.splitlines([keepends])\n'
+                   'str.splitlines(keepends=False)\n'
                    '\n'
                    '   Return a list of the lines in the string, breaking at '
                    'line\n'
@@ -11141,67 +12293,86 @@
  'subscriptions': 'Subscriptions\n'
                   '*************\n'
                   '\n'
-                  'Subscription of a sequence (string, tuple or list) or '
-                  'mapping\n'
-                  '(dictionary) object usually selects an item from the '
-                  'collection:\n'
+                  'The subscription of an instance of a container class will '
+                  'generally\n'
+                  'select an element from the container. The subscription of a '
+                  '*generic\n'
+                  'class* will generally return a GenericAlias object.\n'
                   '\n'
                   '   subscription ::= primary "[" expression_list "]"\n'
                   '\n'
+                  'When an object is subscripted, the interpreter will '
+                  'evaluate the\n'
+                  'primary and the expression list.\n'
+                  '\n'
                   'The primary must evaluate to an object that supports '
-                  'subscription\n'
-                  '(lists or dictionaries for example).  User-defined objects '
-                  'can support\n'
-                  'subscription by defining a "__getitem__()" method.\n'
+                  'subscription. An\n'
+                  'object may support subscription through defining one or '
+                  'both of\n'
+                  '"__getitem__()" and "__class_getitem__()". When the primary '
+                  'is\n'
+                  'subscripted, the evaluated result of the expression list '
+                  'will be\n'
+                  'passed to one of these methods. For more details on when\n'
+                  '"__class_getitem__" is called instead of "__getitem__", '
+                  'see\n'
+                  '__class_getitem__ versus __getitem__.\n'
+                  '\n'
+                  'If the expression list contains at least one comma, it will '
+                  'evaluate\n'
+                  'to a "tuple" containing the items of the expression list. '
+                  'Otherwise,\n'
+                  'the expression list will evaluate to the value of the '
+                  'list’s sole\n'
+                  'member.\n'
                   '\n'
                   'For built-in objects, there are two types of objects that '
                   'support\n'
-                  'subscription:\n'
+                  'subscription via "__getitem__()":\n'
                   '\n'
-                  'If the primary is a mapping, the expression list must '
-                  'evaluate to an\n'
-                  'object whose value is one of the keys of the mapping, and '
+                  '1. Mappings. If the primary is a *mapping*, the expression '
+                  'list must\n'
+                  '   evaluate to an object whose value is one of the keys of '
                   'the\n'
-                  'subscription selects the value in the mapping that '
-                  'corresponds to that\n'
-                  'key.  (The expression list is a tuple except if it has '
-                  'exactly one\n'
-                  'item.)\n'
+                  '   mapping, and the subscription selects the value in the '
+                  'mapping that\n'
+                  '   corresponds to that key. An example of a builtin mapping '
+                  'class is\n'
+                  '   the "dict" class.\n'
                   '\n'
-                  'If the primary is a sequence, the expression list must '
-                  'evaluate to an\n'
-                  'integer or a slice (as discussed in the following '
-                  'section).\n'
+                  '2. Sequences. If the primary is a *sequence*, the '
+                  'expression list must\n'
+                  '   evaluate to an "int" or a "slice" (as discussed in the '
+                  'following\n'
+                  '   section). Examples of builtin sequence classes include '
+                  'the "str",\n'
+                  '   "list" and "tuple" classes.\n'
                   '\n'
                   'The formal syntax makes no special provision for negative '
                   'indices in\n'
-                  'sequences; however, built-in sequences all provide a '
+                  '*sequences*. However, built-in sequences all provide a '
                   '"__getitem__()"\n'
                   'method that interprets negative indices by adding the '
                   'length of the\n'
-                  'sequence to the index (so that "x[-1]" selects the last '
-                  'item of "x").\n'
-                  'The resulting value must be a nonnegative integer less than '
-                  'the number\n'
-                  'of items in the sequence, and the subscription selects the '
-                  'item whose\n'
-                  'index is that value (counting from zero). Since the support '
-                  'for\n'
-                  'negative indices and slicing occurs in the object’s '
-                  '"__getitem__()"\n'
-                  'method, subclasses overriding this method will need to '
-                  'explicitly add\n'
-                  'that support.\n'
+                  'sequence to the index so that, for example, "x[-1]" selects '
+                  'the last\n'
+                  'item of "x". The resulting value must be a nonnegative '
+                  'integer less\n'
+                  'than the number of items in the sequence, and the '
+                  'subscription selects\n'
+                  'the item whose index is that value (counting from zero). '
+                  'Since the\n'
+                  'support for negative indices and slicing occurs in the '
+                  'object’s\n'
+                  '"__getitem__()" method, subclasses overriding this method '
+                  'will need to\n'
+                  'explicitly add that support.\n'
                   '\n'
-                  'A string’s items are characters.  A character is not a '
-                  'separate data\n'
-                  'type but a string of exactly one character.\n'
-                  '\n'
-                  'Subscription of certain *classes* or *types* creates a '
-                  'generic alias.\n'
-                  'In this case, user-defined classes can support subscription '
-                  'by\n'
-                  'providing a "__class_getitem__()" classmethod.\n',
+                  'A "string" is a special kind of sequence whose items are '
+                  '*characters*.\n'
+                  'A character is not a separate data type but a string of '
+                  'exactly one\n'
+                  'character.\n',
  'truth': 'Truth Value Testing\n'
           '*******************\n'
           '\n'
@@ -11259,7 +12430,8 @@
         'object is “compatible” with the exception.  An object is compatible\n'
         'with an exception if it is the class or a base class of the '
         'exception\n'
-        'object or a tuple containing an item compatible with the exception.\n'
+        'object, or a tuple containing an item that is the class or a base\n'
+        'class of the exception object.\n'
         '\n'
         'If no except clause matches the exception, the search for an '
         'exception\n'
@@ -11314,9 +12486,31 @@
         'the\n'
         'exception class, the exception instance and a traceback object (see\n'
         'section The standard type hierarchy) identifying the point in the\n'
-        'program where the exception occurred.  "sys.exc_info()" values are\n'
-        'restored to their previous values (before the call) when returning\n'
-        'from a function that handled an exception.\n'
+        'program where the exception occurred.  The details about the '
+        'exception\n'
+        'accessed via "sys.exc_info()" are restored to their previous values\n'
+        'when leaving an exception handler:\n'
+        '\n'
+        '   >>> print(sys.exc_info())\n'
+        '   (None, None, None)\n'
+        '   >>> try:\n'
+        '   ...     raise TypeError\n'
+        '   ... except:\n'
+        '   ...     print(sys.exc_info())\n'
+        '   ...     try:\n'
+        '   ...          raise ValueError\n'
+        '   ...     except:\n'
+        '   ...         print(sys.exc_info())\n'
+        '   ...     print(sys.exc_info())\n'
+        '   ...\n'
+        "   (<class 'TypeError'>, TypeError(), <traceback object at "
+        '0x10efad080>)\n'
+        "   (<class 'ValueError'>, ValueError(), <traceback object at "
+        '0x10efad040>)\n'
+        "   (<class 'TypeError'>, TypeError(), <traceback object at "
+        '0x10efad080>)\n'
+        '   >>> print(sys.exc_info())\n'
+        '   (None, None, None)\n'
         '\n'
         'The optional "else" clause is executed if the control flow leaves '
         'the\n'
@@ -11480,7 +12674,6 @@
           '      There are two types of integers:\n'
           '\n'
           '      Integers ("int")\n'
-          '\n'
           '         These represent numbers in an unlimited range, subject to\n'
           '         available (virtual) memory only.  For the purpose of '
           'shift\n'
@@ -11577,7 +12770,7 @@
           '         points. All the code points in the range "U+0000 - '
           'U+10FFFF"\n'
           '         can be represented in a string.  Python doesn’t have a '
-          '"char"\n'
+          '*char*\n'
           '         type; instead, every code point in the string is '
           'represented\n'
           '         as a string object with length "1".  The built-in '
@@ -11837,7 +13030,13 @@
           '|             |\n'
           '      |                           | and "\'return\'" for the '
           'return   |             |\n'
-          '      |                           | annotation, if provided.        '
+          '      |                           | annotation, if provided.  For   '
+          '|             |\n'
+          '      |                           | more information on working     '
+          '|             |\n'
+          '      |                           | with this attribute, see        '
+          '|             |\n'
+          '      |                           | Annotations Best Practices.     '
           '|             |\n'
           '      '
           '+---------------------------+---------------------------------+-------------+\n'
@@ -11958,20 +13157,18 @@
           '      A function or method which uses the "yield" statement (see\n'
           '      section The yield statement) is called a *generator '
           'function*.\n'
-          '      Such a function, when called, always returns an iterator '
-          'object\n'
-          '      which can be used to execute the body of the function:  '
-          'calling\n'
-          '      the iterator’s "iterator.__next__()" method will cause the\n'
-          '      function to execute until it provides a value using the '
-          '"yield"\n'
-          '      statement.  When the function executes a "return" statement '
-          'or\n'
-          '      falls off the end, a "StopIteration" exception is raised and '
-          'the\n'
-          '      iterator will have reached the end of the set of values to '
-          'be\n'
-          '      returned.\n'
+          '      Such a function, when called, always returns an *iterator*\n'
+          '      object which can be used to execute the body of the '
+          'function:\n'
+          '      calling the iterator’s "iterator.__next__()" method will '
+          'cause\n'
+          '      the function to execute until it provides a value using the\n'
+          '      "yield" statement.  When the function executes a "return"\n'
+          '      statement or falls off the end, a "StopIteration" exception '
+          'is\n'
+          '      raised and the iterator will have reached the end of the set '
+          'of\n'
+          '      values to be returned.\n'
           '\n'
           '   Coroutine functions\n'
           '      A function or method which is defined using "async def" is\n'
@@ -11987,18 +13184,18 @@
           '      which uses the "yield" statement is called a *asynchronous\n'
           '      generator function*.  Such a function, when called, returns '
           'an\n'
-          '      asynchronous iterator object which can be used in an "async '
-          'for"\n'
-          '      statement to execute the body of the function.\n'
+          '      *asynchronous iterator* object which can be used in an '
+          '"async\n'
+          '      for" statement to execute the body of the function.\n'
           '\n'
-          '      Calling the asynchronous iterator’s "aiterator.__anext__()"\n'
-          '      method will return an *awaitable* which when awaited will\n'
-          '      execute until it provides a value using the "yield" '
-          'expression.\n'
-          '      When the function executes an empty "return" statement or '
-          'falls\n'
-          '      off the end, a "StopAsyncIteration" exception is raised and '
+          '      Calling the asynchronous iterator’s "aiterator.__anext__" '
+          'method\n'
+          '      will return an *awaitable* which when awaited will execute '
+          'until\n'
+          '      it provides a value using the "yield" expression.  When the\n'
+          '      function executes an empty "return" statement or falls off '
           'the\n'
+          '      end, a "StopAsyncIteration" exception is raised and the\n'
           '      asynchronous iterator will have reached the end of the set '
           'of\n'
           '      values to be yielded.\n'
@@ -12062,20 +13259,34 @@
           '   Attribute assignment updates the module’s namespace dictionary,\n'
           '   e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n'
           '\n'
-          '   Predefined (writable) attributes: "__name__" is the module’s '
-          'name;\n'
-          '   "__doc__" is the module’s documentation string, or "None" if\n'
-          '   unavailable; "__annotations__" (optional) is a dictionary\n'
-          '   containing *variable annotations* collected during module body\n'
-          '   execution; "__file__" is the pathname of the file from which '
+          '   Predefined (writable) attributes:\n'
+          '\n'
+          '      "__name__"\n'
+          '         The module’s name.\n'
+          '\n'
+          '      "__doc__"\n'
+          '         The module’s documentation string, or "None" if '
+          'unavailable.\n'
+          '\n'
+          '      "__file__"\n'
+          '         The pathname of the file from which the module was loaded, '
+          'if\n'
+          '         it was loaded from a file. The "__file__" attribute may '
+          'be\n'
+          '         missing for certain types of modules, such as C modules '
+          'that\n'
+          '         are statically linked into the interpreter.  For '
+          'extension\n'
+          '         modules loaded dynamically from a shared library, it’s '
           'the\n'
-          '   module was loaded, if it was loaded from a file. The "__file__"\n'
-          '   attribute may be missing for certain types of modules, such as '
-          'C\n'
-          '   modules that are statically linked into the interpreter; for\n'
-          '   extension modules loaded dynamically from a shared library, it '
-          'is\n'
-          '   the pathname of the shared library file.\n'
+          '         pathname of the shared library file.\n'
+          '\n'
+          '      "__annotations__"\n'
+          '         A dictionary containing *variable annotations* collected\n'
+          '         during module body execution.  For best practices on '
+          'working\n'
+          '         with "__annotations__", please see Annotations Best\n'
+          '         Practices.\n'
           '\n'
           '   Special read-only attribute: "__dict__" is the module’s '
           'namespace\n'
@@ -12133,20 +13344,31 @@
           'instance\n'
           '   (see below).\n'
           '\n'
-          '   Special attributes: "__name__" is the class name; "__module__" '
-          'is\n'
-          '   the module name in which the class was defined; "__dict__" is '
-          'the\n'
-          '   dictionary containing the class’s namespace; "__bases__" is a '
-          'tuple\n'
-          '   containing the base classes, in the order of their occurrence '
-          'in\n'
-          '   the base class list; "__doc__" is the class’s documentation '
-          'string,\n'
-          '   or "None" if undefined; "__annotations__" (optional) is a\n'
-          '   dictionary containing *variable annotations* collected during '
-          'class\n'
-          '   body execution.\n'
+          '   Special attributes:\n'
+          '\n'
+          '      "__name__"\n'
+          '         The class name.\n'
+          '\n'
+          '      "__module__"\n'
+          '         The name of the module in which the class was defined.\n'
+          '\n'
+          '      "__dict__"\n'
+          '         The dictionary containing the class’s namespace.\n'
+          '\n'
+          '      "__bases__"\n'
+          '         A tuple containing the base classes, in the order of '
+          'their\n'
+          '         occurrence in the base class list.\n'
+          '\n'
+          '      "__doc__"\n'
+          '         The class’s documentation string, or "None" if undefined.\n'
+          '\n'
+          '      "__annotations__"\n'
+          '         A dictionary containing *variable annotations* collected\n'
+          '         during class body execution.  For best practices on '
+          'working\n'
+          '         with "__annotations__", please see Annotations Best\n'
+          '         Practices.\n'
           '\n'
           'Class instances\n'
           '   A class instance is created by calling a class object (see '
@@ -12307,6 +13529,10 @@
           '      gives the precise instruction (this is an index into the\n'
           '      bytecode string of the code object).\n'
           '\n'
+          '      Accessing "f_code" raises an auditing event '
+          '"object.__getattr__"\n'
+          '      with arguments "obj" and ""f_code"".\n'
+          '\n'
           '      Special writable attributes: "f_trace", if not "None", is a\n'
           '      function called for various events during code execution '
           '(this\n'
@@ -12390,6 +13616,9 @@
           '      the exception occurred in a "try" statement with no matching\n'
           '      except clause or with a finally clause.\n'
           '\n'
+          '      Accessing "tb_frame" raises an auditing event\n'
+          '      "object.__getattr__" with arguments "obj" and ""tb_frame"".\n'
+          '\n'
           '      Special writable attribute: "tb_next" is the next level in '
           'the\n'
           '      stack trace (towards the frame where the exception occurred), '
@@ -12440,9 +13669,8 @@
           '      object actually returned is the wrapped object, which is not\n'
           '      subject to any further transformation. Static method objects '
           'are\n'
-          '      not themselves callable, although the objects they wrap '
-          'usually\n'
-          '      are. Static method objects are created by the built-in\n'
+          '      also callable. Static method objects are created by the '
+          'built-in\n'
           '      "staticmethod()" constructor.\n'
           '\n'
           '   Class method objects\n'
@@ -12511,9 +13739,9 @@
                  '"dict"\n'
                  'constructor.\n'
                  '\n'
-                 'class dict(**kwarg)\n'
-                 'class dict(mapping, **kwarg)\n'
-                 'class dict(iterable, **kwarg)\n'
+                 'class dict(**kwargs)\n'
+                 'class dict(mapping, **kwargs)\n'
+                 'class dict(iterable, **kwargs)\n'
                  '\n'
                  '   Return a new dictionary initialized from an optional '
                  'positional\n'
@@ -12907,6 +14135,14 @@
                  '   Changed in version 3.8: Dictionary views are now '
                  'reversible.\n'
                  '\n'
+                 'dictview.mapping\n'
+                 '\n'
+                 '   Return a "types.MappingProxyType" that wraps the '
+                 'original\n'
+                 '   dictionary to which the view refers.\n'
+                 '\n'
+                 '   New in version 3.10.\n'
+                 '\n'
                  'Keys views are set-like since their entries are unique and '
                  'hashable.\n'
                  'If all values are hashable, so that "(key, value)" pairs are '
@@ -12952,7 +14188,15 @@
                  "   >>> keys & {'eggs', 'bacon', 'salad'}\n"
                  "   {'bacon'}\n"
                  "   >>> keys ^ {'sausage', 'juice'}\n"
-                 "   {'juice', 'sausage', 'bacon', 'spam'}\n",
+                 "   {'juice', 'sausage', 'bacon', 'spam'}\n"
+                 '\n'
+                 '   >>> # get back a read-only proxy for the original '
+                 'dictionary\n'
+                 '   >>> values.mapping\n'
+                 "   mappingproxy({'eggs': 2, 'sausage': 1, 'bacon': 1, "
+                 "'spam': 500})\n"
+                 "   >>> values.mapping['spam']\n"
+                 '   500\n',
  'typesmethods': 'Methods\n'
                  '*******\n'
                  '\n'
@@ -13147,6 +14391,14 @@
              'Comparisons in\n'
              'the language reference.)\n'
              '\n'
+             'Forward and reversed iterators over mutable sequences access '
+             'values\n'
+             'using an index.  That index will continue to march forward (or\n'
+             'backward) even if the underlying sequence is mutated.  The '
+             'iterator\n'
+             'terminates only when an "IndexError" or a "StopIteration" is\n'
+             'encountered (or when the index drops below zero).\n'
+             '\n'
              'Notes:\n'
              '\n'
              '1. While the "in" and "not in" operations are used only for '
@@ -13378,7 +14630,7 @@
              '|                                | "s[i:i] = '
              '[x]")                  |                       |\n'
              '+--------------------------------+----------------------------------+-----------------------+\n'
-             '| "s.pop([i])"                   | retrieves the item at *i* '
+             '| "s.pop()" or "s.pop(i)"        | retrieves the item at *i* '
              'and    | (2)                   |\n'
              '|                                | also removes it from '
              '*s*         |                       |\n'
@@ -13618,7 +14870,8 @@
              '\n'
              '   The arguments to the range constructor must be integers '
              '(either\n'
-             '   built-in "int" or any object that implements the "__index__"\n'
+             '   built-in "int" or any object that implements the '
+             '"__index__()"\n'
              '   special method).  If the *step* argument is omitted, it '
              'defaults to\n'
              '   "1". If the *start* argument is omitted, it defaults to "0". '
@@ -13841,7 +15094,7 @@
                      '|                                | "s[i:i] = '
                      '[x]")                  |                       |\n'
                      '+--------------------------------+----------------------------------+-----------------------+\n'
-                     '| "s.pop([i])"                   | retrieves the item at '
+                     '| "s.pop()" or "s.pop(i)"        | retrieves the item at '
                      '*i* and    | (2)                   |\n'
                      '|                                | also removes it from '
                      '*s*         |                       |\n'
@@ -13906,15 +15159,21 @@
           '   u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n'
           '\n'
           'The unary "-" (minus) operator yields the negation of its numeric\n'
-          'argument.\n'
+          'argument; the operation can be overridden with the "__neg__()" '
+          'special\n'
+          'method.\n'
           '\n'
           'The unary "+" (plus) operator yields its numeric argument '
-          'unchanged.\n'
+          'unchanged;\n'
+          'the operation can be overridden with the "__pos__()" special '
+          'method.\n'
           '\n'
           'The unary "~" (invert) operator yields the bitwise inversion of '
           'its\n'
           'integer argument.  The bitwise inversion of "x" is defined as\n'
-          '"-(x+1)".  It only applies to integral numbers.\n'
+          '"-(x+1)".  It only applies to integral numbers or to custom '
+          'objects\n'
+          'that override the "__invert__()" special method.\n'
           '\n'
           'In all three cases, if the argument does not have the proper type, '
           'a\n'
@@ -13952,8 +15211,10 @@
          'usage\n'
          'patterns to be encapsulated for convenient reuse.\n'
          '\n'
-         '   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
-         '   with_item ::= expression ["as" target]\n'
+         '   with_stmt          ::= "with" ( "(" with_stmt_contents ","? ")" | '
+         'with_stmt_contents ) ":" suite\n'
+         '   with_stmt_contents ::= with_item ("," with_item)*\n'
+         '   with_item          ::= expression ["as" target]\n'
          '\n'
          'The execution of the "with" statement with one “item” proceeds as\n'
          'follows:\n'
@@ -14039,8 +15300,20 @@
          '       with B() as b:\n'
          '           SUITE\n'
          '\n'
+         'You can also write multi-item context managers in multiple lines if\n'
+         'the items are surrounded by parentheses. For example:\n'
+         '\n'
+         '   with (\n'
+         '       A() as a,\n'
+         '       B() as b,\n'
+         '   ):\n'
+         '       SUITE\n'
+         '\n'
          'Changed in version 3.1: Support for multiple context expressions.\n'
          '\n'
+         'Changed in version 3.10: Support for using grouping parentheses to\n'
+         'break the statement in multiple lines.\n'
+         '\n'
          'See also:\n'
          '\n'
          '  **PEP 343** - The “with” statement\n'
diff --git a/Lib/random.py b/Lib/random.py
index a6454f5..1310a2d 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -48,9 +48,10 @@
 from warnings import warn as _warn
 from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
 from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
-from math import tau as TWOPI, floor as _floor
+from math import tau as TWOPI, floor as _floor, isfinite as _isfinite
 from os import urandom as _urandom
 from _collections_abc import Set as _Set, Sequence as _Sequence
+from operator import index as _index
 from itertools import accumulate as _accumulate, repeat as _repeat
 from bisect import bisect as _bisect
 import os as _os
@@ -77,6 +78,7 @@
     "lognormvariate",
     "normalvariate",
     "paretovariate",
+    "randbytes",
     "randint",
     "random",
     "randrange",
@@ -95,6 +97,7 @@
 SG_MAGICCONST = 1.0 + _log(4.5)
 BPF = 53        # Number of bits in a float
 RECIP_BPF = 2 ** -BPF
+_ONE = 1
 
 
 class Random(_random.Random):
@@ -151,8 +154,7 @@
         elif version == 2 and isinstance(a, (str, bytes, bytearray)):
             if isinstance(a, str):
                 a = a.encode()
-            a += _sha512(a).digest()
-            a = int.from_bytes(a, 'big')
+            a = int.from_bytes(a + _sha512(a).digest(), 'big')
 
         elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)):
             _warn('Seeding based on hashing is deprecated\n'
@@ -287,7 +289,7 @@
 
     ## -------------------- integer methods  -------------------
 
-    def randrange(self, start, stop=None, step=1):
+    def randrange(self, start, stop=None, step=_ONE):
         """Choose a random item from range(start, stop[, step]).
 
         This fixes the problem with randint() which includes the
@@ -297,38 +299,68 @@
 
         # This code is a bit messy to make it fast for the
         # common case while still doing adequate error checking.
-        istart = int(start)
-        if istart != start:
-            raise ValueError("non-integer arg 1 for randrange()")
+        try:
+            istart = _index(start)
+        except TypeError:
+            istart = int(start)
+            if istart != start:
+                _warn('randrange() will raise TypeError in the future',
+                      DeprecationWarning, 2)
+                raise ValueError("non-integer arg 1 for randrange()")
+            _warn('non-integer arguments to randrange() have been deprecated '
+                  'since Python 3.10 and will be removed in a subsequent '
+                  'version',
+                  DeprecationWarning, 2)
         if stop is None:
+            # We don't check for "step != 1" because it hasn't been
+            # type checked and converted to an integer yet.
+            if step is not _ONE:
+                raise TypeError('Missing a non-None stop argument')
             if istart > 0:
                 return self._randbelow(istart)
             raise ValueError("empty range for randrange()")
 
         # stop argument supplied.
-        istop = int(stop)
-        if istop != stop:
-            raise ValueError("non-integer stop for randrange()")
+        try:
+            istop = _index(stop)
+        except TypeError:
+            istop = int(stop)
+            if istop != stop:
+                _warn('randrange() will raise TypeError in the future',
+                      DeprecationWarning, 2)
+                raise ValueError("non-integer stop for randrange()")
+            _warn('non-integer arguments to randrange() have been deprecated '
+                  'since Python 3.10 and will be removed in a subsequent '
+                  'version',
+                  DeprecationWarning, 2)
         width = istop - istart
-        if step == 1 and width > 0:
-            return istart + self._randbelow(width)
-        if step == 1:
+        try:
+            istep = _index(step)
+        except TypeError:
+            istep = int(step)
+            if istep != step:
+                _warn('randrange() will raise TypeError in the future',
+                      DeprecationWarning, 2)
+                raise ValueError("non-integer step for randrange()")
+            _warn('non-integer arguments to randrange() have been deprecated '
+                  'since Python 3.10 and will be removed in a subsequent '
+                  'version',
+                  DeprecationWarning, 2)
+        # Fast path.
+        if istep == 1:
+            if width > 0:
+                return istart + self._randbelow(width)
             raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width))
 
         # Non-unit step argument supplied.
-        istep = int(step)
-        if istep != step:
-            raise ValueError("non-integer step for randrange()")
         if istep > 0:
             n = (width + istep - 1) // istep
         elif istep < 0:
             n = (width + istep + 1) // istep
         else:
             raise ValueError("zero step for randrange()")
-
         if n <= 0:
             raise ValueError("empty range for randrange()")
-
         return istart + istep * self._randbelow(n)
 
     def randint(self, a, b):
@@ -424,13 +456,14 @@
         # too many calls to _randbelow(), making them slower and
         # causing them to eat more entropy than necessary.
 
-        if isinstance(population, _Set):
-            _warn('Sampling from a set deprecated\n'
-                  'since Python 3.9 and will be removed in a subsequent version.',
-                  DeprecationWarning, 2)
-            population = tuple(population)
         if not isinstance(population, _Sequence):
-            raise TypeError("Population must be a sequence.  For dicts or sets, use sorted(d).")
+            if isinstance(population, _Set):
+                _warn('Sampling from a set deprecated\n'
+                      'since Python 3.9 and will be removed in a subsequent version.',
+                      DeprecationWarning, 2)
+                population = tuple(population)
+            else:
+                raise TypeError("Population must be a sequence.  For dicts or sets, use sorted(d).")
         n = len(population)
         if counts is not None:
             cum_counts = list(_accumulate(counts))
@@ -441,7 +474,7 @@
                 raise TypeError('Counts must be integers')
             if total <= 0:
                 raise ValueError('Total of counts must be greater than zero')
-            selections = sample(range(total), k=k)
+            selections = self.sample(range(total), k=k)
             bisect = _bisect
             return [population[bisect(cum_counts, s)] for s in selections]
         randbelow = self._randbelow
@@ -484,7 +517,15 @@
                 floor = _floor
                 n += 0.0    # convert to float for a small speed improvement
                 return [population[floor(random() * n)] for i in _repeat(None, k)]
-            cum_weights = list(_accumulate(weights))
+            try:
+                cum_weights = list(_accumulate(weights))
+            except TypeError:
+                if not isinstance(weights, int):
+                    raise
+                k = weights
+                raise TypeError(
+                    f'The number of choices must be a keyword argument: {k=}'
+                ) from None
         elif weights is not None:
             raise TypeError('Cannot specify both weights and cumulative weights')
         if len(cum_weights) != n:
@@ -492,6 +533,8 @@
         total = cum_weights[-1] + 0.0   # convert to float
         if total <= 0.0:
             raise ValueError('Total of weights must be greater than zero')
+        if not _isfinite(total):
+            raise ValueError('Total of weights must be finite')
         bisect = _bisect
         hi = n - 1
         return [population[bisect(cum_weights, random() * total, 0, hi)]
@@ -682,7 +725,7 @@
             bbb = alpha - LOG4
             ccc = alpha + ainv
 
-            while 1:
+            while True:
                 u1 = random()
                 if not 1e-7 < u1 < 0.9999999:
                     continue
@@ -749,7 +792,7 @@
         # Jain, pg. 495
 
         u = 1.0 - self.random()
-        return 1.0 / u ** (1.0 / alpha)
+        return u ** (-1.0 / alpha)
 
     def weibullvariate(self, alpha, beta):
         """Weibull distribution.
@@ -845,7 +888,7 @@
     from time import perf_counter
 
     t0 = perf_counter()
-    data = [func(*args) for i in range(n)]
+    data = [func(*args) for i in _repeat(None, n)]
     t1 = perf_counter()
 
     xbar = mean(data)
diff --git a/Lib/re.py b/Lib/re.py
index bfb7b1c..1d82b50 100644
--- a/Lib/re.py
+++ b/Lib/re.py
@@ -176,7 +176,6 @@
                 res = f'~{res}'
         return res
     __str__ = object.__str__
-
 globals().update(RegexFlag.__members__)
 
 # sre exception
diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py
index bca4a7b..98b7930 100644
--- a/Lib/rlcompleter.py
+++ b/Lib/rlcompleter.py
@@ -31,6 +31,7 @@
 
 import atexit
 import builtins
+import inspect
 import __main__
 
 __all__ = ["Completer"]
@@ -96,7 +97,13 @@
 
     def _callable_postfix(self, val, word):
         if callable(val):
-            word = word + "("
+            word += "("
+            try:
+                if not inspect.signature(val).parameters:
+                    word += ")"
+            except ValueError:
+                pass
+
         return word
 
     def global_matches(self, text):
@@ -169,13 +176,20 @@
                 if (word[:n] == attr and
                     not (noprefix and word[:n+1] == noprefix)):
                     match = "%s.%s" % (expr, word)
-                    try:
-                        val = getattr(thisobject, word)
-                    except Exception:
-                        pass  # Include even if attribute not set
+                    if isinstance(getattr(type(thisobject), word, None),
+                                  property):
+                        # bpo-44752: thisobject.word is a method decorated by
+                        # `@property`. What follows applies a postfix if
+                        # thisobject.word is callable, but know we know that
+                        # this is not callable (because it is a property).
+                        # Also, getattr(thisobject, word) will evaluate the
+                        # property method, which is not desirable.
+                        matches.append(match)
+                        continue
+                    if (value := getattr(thisobject, word, None)) is not None:
+                        matches.append(self._callable_postfix(value, match))
                     else:
-                        match = self._callable_postfix(val, match)
-                    matches.append(match)
+                        matches.append(match)
             if matches or not noprefix:
                 break
             if noprefix == '_':
diff --git a/Lib/runpy.py b/Lib/runpy.py
index 7e1e1ac..caba121 100644
--- a/Lib/runpy.py
+++ b/Lib/runpy.py
@@ -16,7 +16,6 @@
 import io
 import types
 import os
-from pkgutil import read_code, get_importer
 
 __all__ = [
     "run_module", "run_path",
@@ -233,6 +232,7 @@
 
 def _get_code_from_file(run_name, fname):
     # Check for a compiled file first
+    from pkgutil import read_code
     decoded_path = os.path.abspath(os.fsdecode(fname))
     with io.open_code(decoded_path) as f:
         code = read_code(f)
@@ -255,6 +255,7 @@
     if run_name is None:
         run_name = "<run_path>"
     pkg_name = run_name.rpartition(".")[0]
+    from pkgutil import get_importer
     importer = get_importer(path_name)
     # Trying to avoid importing imp so as to not consume the deprecation warning.
     is_NullImporter = False
diff --git a/Lib/sched.py b/Lib/sched.py
index ff87874..14613cf 100644
--- a/Lib/sched.py
+++ b/Lib/sched.py
@@ -26,23 +26,19 @@
 import time
 import heapq
 from collections import namedtuple
+from itertools import count
 import threading
 from time import monotonic as _time
 
 __all__ = ["scheduler"]
 
-class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')):
-    __slots__ = []
-    def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
-    def __lt__(s, o): return (s.time, s.priority) <  (o.time, o.priority)
-    def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
-    def __gt__(s, o): return (s.time, s.priority) >  (o.time, o.priority)
-    def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
-
+Event = namedtuple('Event', 'time, priority, sequence, action, argument, kwargs')
 Event.time.__doc__ = ('''Numeric type compatible with the return value of the
 timefunc function passed to the constructor.''')
 Event.priority.__doc__ = ('''Events scheduled for the same time will be executed
 in the order of their priority.''')
+Event.sequence.__doc__ = ('''A continually increasing sequence number that
+    separates events if time and priority are equal.''')
 Event.action.__doc__ = ('''Executing the event means executing
 action(*argument, **kwargs)''')
 Event.argument.__doc__ = ('''argument is a sequence holding the positional
@@ -61,6 +57,7 @@
         self._lock = threading.RLock()
         self.timefunc = timefunc
         self.delayfunc = delayfunc
+        self._sequence_generator = count()
 
     def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
         """Enter a new event in the queue at an absolute time.
@@ -71,8 +68,10 @@
         """
         if kwargs is _sentinel:
             kwargs = {}
-        event = Event(time, priority, action, argument, kwargs)
+
         with self._lock:
+            event = Event(time, priority, next(self._sequence_generator),
+                          action, argument, kwargs)
             heapq.heappush(self._queue, event)
         return event # The ID
 
@@ -136,7 +135,8 @@
             with lock:
                 if not q:
                     break
-                time, priority, action, argument, kwargs = q[0]
+                (time, priority, sequence, action,
+                 argument, kwargs) = q[0]
                 now = timefunc()
                 if time > now:
                     delay = True
diff --git a/Lib/shelve.py b/Lib/shelve.py
index 5d443a0..e053c39 100644
--- a/Lib/shelve.py
+++ b/Lib/shelve.py
@@ -56,7 +56,7 @@
 the persistent dictionary on disk, if feasible).
 """
 
-from pickle import Pickler, Unpickler
+from pickle import DEFAULT_PROTOCOL, Pickler, Unpickler
 from io import BytesIO
 
 import collections.abc
@@ -85,7 +85,7 @@
                  keyencoding="utf-8"):
         self.dict = dict
         if protocol is None:
-            protocol = 3
+            protocol = DEFAULT_PROTOCOL
         self._protocol = protocol
         self.writeback = writeback
         self.cache = {}
diff --git a/Lib/shutil.py b/Lib/shutil.py
index f0e833d..37bf98d 100644
--- a/Lib/shutil.py
+++ b/Lib/shutil.py
@@ -32,16 +32,6 @@
 except ImportError:
     _LZMA_SUPPORTED = False
 
-try:
-    from pwd import getpwnam
-except ImportError:
-    getpwnam = None
-
-try:
-    from grp import getgrnam
-except ImportError:
-    getgrnam = None
-
 _WINDOWS = os.name == 'nt'
 posix = nt = None
 if os.name == 'posix':
@@ -261,28 +251,37 @@
     if not follow_symlinks and _islink(src):
         os.symlink(os.readlink(src), dst)
     else:
-        with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst:
-            # macOS
-            if _HAS_FCOPYFILE:
-                try:
-                    _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
-                    return dst
-                except _GiveupOnFastCopy:
-                    pass
-            # Linux
-            elif _USE_CP_SENDFILE:
-                try:
-                    _fastcopy_sendfile(fsrc, fdst)
-                    return dst
-                except _GiveupOnFastCopy:
-                    pass
-            # Windows, see:
-            # https://github.com/python/cpython/pull/7160#discussion_r195405230
-            elif _WINDOWS and file_size > 0:
-                _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
-                return dst
+        with open(src, 'rb') as fsrc:
+            try:
+                with open(dst, 'wb') as fdst:
+                    # macOS
+                    if _HAS_FCOPYFILE:
+                        try:
+                            _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
+                            return dst
+                        except _GiveupOnFastCopy:
+                            pass
+                    # Linux
+                    elif _USE_CP_SENDFILE:
+                        try:
+                            _fastcopy_sendfile(fsrc, fdst)
+                            return dst
+                        except _GiveupOnFastCopy:
+                            pass
+                    # Windows, see:
+                    # https://github.com/python/cpython/pull/7160#discussion_r195405230
+                    elif _WINDOWS and file_size > 0:
+                        _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
+                        return dst
 
-            copyfileobj(fsrc, fdst)
+                    copyfileobj(fsrc, fdst)
+
+            # Issue 43219, raise a less confusing exception
+            except IsADirectoryError as e:
+                if not os.path.exists(dst):
+                    raise FileNotFoundError(f'Directory does not exist: {dst}') from e
+                else:
+                    raise
 
     return dst
 
@@ -647,6 +646,7 @@
         if is_dir:
             try:
                 dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd)
+                dirfd_closed = False
             except OSError:
                 onerror(os.open, fullname, sys.exc_info())
             else:
@@ -654,6 +654,8 @@
                     if os.path.samestat(orig_st, os.fstat(dirfd)):
                         _rmtree_safe_fd(dirfd, fullname, onerror)
                         try:
+                            os.close(dirfd)
+                            dirfd_closed = True
                             os.rmdir(entry.name, dir_fd=topfd)
                         except OSError:
                             onerror(os.rmdir, fullname, sys.exc_info())
@@ -667,7 +669,8 @@
                         except OSError:
                             onerror(os.path.islink, fullname, sys.exc_info())
                 finally:
-                    os.close(dirfd)
+                    if not dirfd_closed:
+                        os.close(dirfd)
         else:
             try:
                 os.unlink(entry.name, dir_fd=topfd)
@@ -710,6 +713,7 @@
             return
         try:
             fd = os.open(path, os.O_RDONLY)
+            fd_closed = False
         except Exception:
             onerror(os.open, path, sys.exc_info())
             return
@@ -717,6 +721,8 @@
             if os.path.samestat(orig_st, os.fstat(fd)):
                 _rmtree_safe_fd(fd, path, onerror)
                 try:
+                    os.close(fd)
+                    fd_closed = True
                     os.rmdir(path)
                 except OSError:
                     onerror(os.rmdir, path, sys.exc_info())
@@ -727,7 +733,8 @@
                 except OSError:
                     onerror(os.path.islink, path, sys.exc_info())
         finally:
-            os.close(fd)
+            if not fd_closed:
+                os.close(fd)
     else:
         try:
             if _rmtree_islink(path):
@@ -813,6 +820,12 @@
             if _destinsrc(src, dst):
                 raise Error("Cannot move a directory '%s' into itself"
                             " '%s'." % (src, dst))
+            if (_is_immutable(src)
+                    or (not os.access(src, os.W_OK) and os.listdir(src)
+                        and sys.platform == 'darwin')):
+                raise PermissionError("Cannot move the non-empty directory "
+                                      "'%s': Lacking write permission to '%s'."
+                                      % (src, src))
             copytree(src, real_dst, copy_function=copy_function,
                      symlinks=True)
             rmtree(src)
@@ -830,10 +843,21 @@
         dst += os.path.sep
     return dst.startswith(src)
 
+def _is_immutable(src):
+    st = _stat(src)
+    immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE]
+    return hasattr(st, 'st_flags') and st.st_flags in immutable_states
+
 def _get_gid(name):
     """Returns a gid, given a group name."""
-    if getgrnam is None or name is None:
+    if name is None:
         return None
+
+    try:
+        from grp import getgrnam
+    except ImportError:
+        return None
+
     try:
         result = getgrnam(name)
     except KeyError:
@@ -844,8 +868,14 @@
 
 def _get_uid(name):
     """Returns an uid, given a user name."""
-    if getpwnam is None or name is None:
+    if name is None:
         return None
+
+    try:
+        from pwd import getpwnam
+    except ImportError:
+        return None
+
     try:
         result = getpwnam(name)
     except KeyError:
@@ -1148,20 +1178,16 @@
             if name.startswith('/') or '..' in name:
                 continue
 
-            target = os.path.join(extract_dir, *name.split('/'))
-            if not target:
+            targetpath = os.path.join(extract_dir, *name.split('/'))
+            if not targetpath:
                 continue
 
-            _ensure_directory(target)
+            _ensure_directory(targetpath)
             if not name.endswith('/'):
                 # file
-                data = zip.read(info.filename)
-                f = open(target, 'wb')
-                try:
-                    f.write(data)
-                finally:
-                    f.close()
-                    del data
+                with zip.open(name, 'r') as source, \
+                        open(targetpath, 'wb') as target:
+                    copyfileobj(source, target)
     finally:
         zip.close()
 
diff --git a/Lib/signal.py b/Lib/signal.py
index d4a6d6f..50b215b 100644
--- a/Lib/signal.py
+++ b/Lib/signal.py
@@ -1,6 +1,5 @@
 import _signal
 from _signal import *
-from functools import wraps as _wraps
 from enum import IntEnum as _IntEnum
 
 _globals = globals()
@@ -42,6 +41,16 @@
         return value
 
 
+# Similar to functools.wraps(), but only assign __doc__.
+# __module__ should be preserved,
+# __name__ and __qualname__ are already fine,
+# __annotations__ is not set.
+def _wraps(wrapped):
+    def decorator(wrapper):
+        wrapper.__doc__ = wrapped.__doc__
+        return wrapper
+    return decorator
+
 @_wraps(_signal.signal)
 def signal(signalnum, handler):
     handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
@@ -59,7 +68,6 @@
     def pthread_sigmask(how, mask):
         sigs_set = _signal.pthread_sigmask(how, mask)
         return set(_int_to_enum(x, Signals) for x in sigs_set)
-    pthread_sigmask.__doc__ = _signal.pthread_sigmask.__doc__
 
 
 if 'sigpending' in _globals:
@@ -73,7 +81,6 @@
     def sigwait(sigset):
         retsig = _signal.sigwait(sigset)
         return _int_to_enum(retsig, Signals)
-    sigwait.__doc__ = _signal.sigwait
 
 
 if 'valid_signals' in _globals:
diff --git a/Lib/site.py b/Lib/site.py
index 9e617af..939893e 100644
--- a/Lib/site.py
+++ b/Lib/site.py
@@ -88,6 +88,11 @@
 USER_BASE = None
 
 
+def _trace(message):
+    if sys.flags.verbose:
+        print(message, file=sys.stderr)
+
+
 def makepath(*paths):
     dir = os.path.join(*paths)
     try:
@@ -100,8 +105,15 @@
 def abs_paths():
     """Set all module __file__ and __cached__ attributes to an absolute path"""
     for m in set(sys.modules.values()):
-        if (getattr(getattr(m, '__loader__', None), '__module__', None) not in
-                ('_frozen_importlib', '_frozen_importlib_external')):
+        loader_module = None
+        try:
+            loader_module = m.__loader__.__module__
+        except AttributeError:
+            try:
+                loader_module = m.__spec__.loader.__module__
+            except AttributeError:
+                pass
+        if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}:
             continue   # don't mess with a PEP 302-supplied __file__
         try:
             m.__file__ = os.path.abspath(m.__file__)
@@ -156,14 +168,19 @@
     else:
         reset = False
     fullname = os.path.join(sitedir, name)
+    _trace(f"Processing .pth file: {fullname!r}")
     try:
-        f = io.TextIOWrapper(io.open_code(fullname))
+        # locale encoding is not ideal especially on Windows. But we have used
+        # it for a long time. setuptools uses the locale encoding too.
+        f = io.TextIOWrapper(io.open_code(fullname), encoding="locale")
     except OSError:
         return
     with f:
         for n, line in enumerate(f):
             if line.startswith("#"):
                 continue
+            if line.strip() == "":
+                continue
             try:
                 if line.startswith(("import ", "import\t")):
                     exec(line)
@@ -190,6 +207,7 @@
 def addsitedir(sitedir, known_paths=None):
     """Add 'sitedir' argument to sys.path if missing and handle .pth files in
     'sitedir'"""
+    _trace(f"Adding directory: {sitedir!r}")
     if known_paths is None:
         known_paths = _init_pathinfo()
         reset = True
@@ -248,6 +266,10 @@
     if env_base:
         return env_base
 
+    # VxWorks has no home directories
+    if sys.platform == "vxworks":
+        return None
+
     def joinuser(*args):
         return os.path.expanduser(os.path.join(*args))
 
@@ -267,7 +289,8 @@
     version = sys.version_info
 
     if os.name == 'nt':
-        return f'{userbase}\\Python{version[0]}{version[1]}\\site-packages'
+        ver_nodot = sys.winver.replace('.', '')
+        return f'{userbase}\\Python{ver_nodot}\\site-packages'
 
     if sys.platform == 'darwin' and sys._framework:
         return f'{userbase}/lib/python/site-packages'
@@ -294,11 +317,14 @@
     If the global variable ``USER_SITE`` is not initialized yet, this
     function will also set it.
     """
-    global USER_SITE
+    global USER_SITE, ENABLE_USER_SITE
     userbase = getuserbase() # this will also set USER_BASE
 
     if USER_SITE is None:
-        USER_SITE = _get_path(userbase)
+        if userbase is None:
+            ENABLE_USER_SITE = False # disable user site and return None
+        else:
+            USER_SITE = _get_path(userbase)
 
     return USER_SITE
 
@@ -310,6 +336,7 @@
     """
     # get the per user site-package path
     # this call will also make sure USER_BASE and USER_SITE are set
+    _trace("Processing user site-packages")
     user_site = getusersitepackages()
 
     if ENABLE_USER_SITE and os.path.isdir(user_site):
@@ -354,6 +381,7 @@
 
 def addsitepackages(known_paths, prefixes=None):
     """Add site-packages to sys.path"""
+    _trace("Processing global site-packages")
     for sitedir in getsitepackages(prefixes):
         if os.path.isdir(sitedir):
             addsitedir(sitedir, known_paths)
@@ -611,11 +639,14 @@
         for dir in sys.path:
             print("    %r," % (dir,))
         print("]")
-        print("USER_BASE: %r (%s)" % (user_base,
-            "exists" if os.path.isdir(user_base) else "doesn't exist"))
-        print("USER_SITE: %r (%s)" % (user_site,
-            "exists" if os.path.isdir(user_site) else "doesn't exist"))
-        print("ENABLE_USER_SITE: %r" %  ENABLE_USER_SITE)
+        def exists(path):
+            if path is not None and os.path.isdir(path):
+                return "exists"
+            else:
+                return "doesn't exist"
+        print(f"USER_BASE: {user_base!r} ({exists(user_base)})")
+        print(f"USER_SITE: {user_site!r} ({exists(user_site)})")
+        print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}")
         sys.exit(0)
 
     buffer = []
diff --git a/Lib/smtpd.py b/Lib/smtpd.py
index 8f1a22e..bc43331 100644
--- a/Lib/smtpd.py
+++ b/Lib/smtpd.py
@@ -83,8 +83,6 @@
 import getopt
 import time
 import socket
-import asyncore
-import asynchat
 import collections
 from warnings import warn
 from email._header_value_parser import get_addr_spec, get_angle_addr
@@ -94,6 +92,19 @@
     "MailmanProxy",
 ]
 
+warn(
+    'The smtpd module is deprecated and unmaintained.  Please see aiosmtpd '
+    '(https://aiosmtpd.readthedocs.io/) for the recommended replacement.',
+    DeprecationWarning,
+    stacklevel=2)
+
+
+# These are imported after the above warning so that users get the correct
+# deprecation warning.
+import asyncore
+import asynchat
+
+
 program = sys.argv[0]
 __version__ = 'Python SMTP proxy version 0.3'
 
@@ -163,7 +174,7 @@
             # a race condition  may occur if the other end is closing
             # before we can get the peername
             self.close()
-            if err.args[0] != errno.ENOTCONN:
+            if err.errno != errno.ENOTCONN:
                 raise
             return
         print('Peer:', repr(self.peer), file=DEBUGSTREAM)
diff --git a/Lib/smtplib.py b/Lib/smtplib.py
index 7808ba0..324a1c1 100644
--- a/Lib/smtplib.py
+++ b/Lib/smtplib.py
@@ -64,6 +64,7 @@
 CRLF = "\r\n"
 bCRLF = b"\r\n"
 _MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
+_MAXCHALLENGE = 5  # Maximum number of AUTH challenges sent
 
 OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
 
@@ -167,7 +168,7 @@
     """Quote data for email.
 
     Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
-    Internet CRLF end-of-line.
+    internet CRLF end-of-line.
     """
     return re.sub(r'(?m)^\.', '..',
         re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
@@ -222,7 +223,7 @@
     helo_resp = None
     ehlo_msg = "ehlo"
     ehlo_resp = None
-    does_esmtp = 0
+    does_esmtp = False
     default_port = SMTP_PORT
 
     def __init__(self, host='', port=0, local_hostname=None,
@@ -230,8 +231,8 @@
                  source_address=None):
         """Initialize a new instance.
 
-        If specified, `host' is the name of the remote host to which to
-        connect.  If specified, `port' specifies the port to which to connect.
+        If specified, `host` is the name of the remote host to which to
+        connect.  If specified, `port` specifies the port to which to connect.
         By default, smtplib.SMTP_PORT is used.  If a host is specified the
         connect method is called, and if it returns anything other than a
         success code an SMTPConnectError is raised.  If specified,
@@ -248,6 +249,7 @@
         self.esmtp_features = {}
         self.command_encoding = 'ascii'
         self.source_address = source_address
+        self._auth_challenge_count = 0
 
         if host:
             (code, msg) = self.connect(host, port)
@@ -365,10 +367,15 @@
     def putcmd(self, cmd, args=""):
         """Send a command to the server."""
         if args == "":
-            str = '%s%s' % (cmd, CRLF)
+            s = cmd
         else:
-            str = '%s %s%s' % (cmd, args, CRLF)
-        self.send(str)
+            s = f'{cmd} {args}'
+        if '\r' in s or '\n' in s:
+            s = s.replace('\n', '\\n').replace('\r', '\\r')
+            raise ValueError(
+                f'command and arguments contain prohibited newline characters: {s}'
+            )
+        self.send(f'{s}{CRLF}')
 
     def getreply(self):
         """Get a reply from the server.
@@ -452,7 +459,7 @@
         self.ehlo_resp = msg
         if code != 250:
             return (code, msg)
-        self.does_esmtp = 1
+        self.does_esmtp = True
         #parse the ehlo response -ddm
         assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
         resp = self.ehlo_resp.decode("latin-1").split('\n')
@@ -633,14 +640,23 @@
         if initial_response is not None:
             response = encode_base64(initial_response.encode('ascii'), eol='')
             (code, resp) = self.docmd("AUTH", mechanism + " " + response)
+            self._auth_challenge_count = 1
         else:
             (code, resp) = self.docmd("AUTH", mechanism)
+            self._auth_challenge_count = 0
         # If server responds with a challenge, send the response.
-        if code == 334:
+        while code == 334:
+            self._auth_challenge_count += 1
             challenge = base64.decodebytes(resp)
             response = encode_base64(
                 authobject(challenge).encode('ascii'), eol='')
             (code, resp) = self.docmd(response)
+            # If server keeps sending challenges, something is wrong.
+            if self._auth_challenge_count > _MAXCHALLENGE:
+                raise SMTPException(
+                    "Server AUTH mechanism infinite loop. Last response: "
+                    + repr((code, resp))
+                )
         if code in (235, 503):
             return (code, resp)
         raise SMTPAuthenticationError(code, resp)
@@ -662,7 +678,7 @@
     def auth_login(self, challenge=None):
         """ Authobject to use with LOGIN authentication. Requires self.user and
         self.password to be set."""
-        if challenge is None:
+        if challenge is None or self._auth_challenge_count < 2:
             return self.user
         else:
             return self.password
@@ -781,7 +797,7 @@
             self.helo_resp = None
             self.ehlo_resp = None
             self.esmtp_features = {}
-            self.does_esmtp = 0
+            self.does_esmtp = False
         else:
             # RFC 3207:
             # 501 Syntax error (no parameters allowed)
@@ -1082,7 +1098,8 @@
         # Handle Unix-domain sockets.
         try:
             self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            self.sock.settimeout(self.timeout)
+            if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+                self.sock.settimeout(self.timeout)
             self.file = None
             self.sock.connect(host)
         except OSError:
diff --git a/Lib/socket.py b/Lib/socket.py
index cafa573..63ba0ac 100644
--- a/Lib/socket.py
+++ b/Lib/socket.py
@@ -337,6 +337,7 @@
             buffer = io.BufferedWriter(raw, buffering)
         if binary:
             return buffer
+        encoding = io.text_encoding(encoding)
         text = io.TextIOWrapper(buffer, encoding, errors, newline)
         text.mode = mode
         return text
@@ -377,7 +378,7 @@
             try:
                 while True:
                     if timeout and not selector_select(timeout):
-                        raise _socket.timeout('timed out')
+                        raise TimeoutError('timed out')
                     if count:
                         blocksize = count - total_sent
                         if blocksize <= 0:
@@ -706,7 +707,7 @@
                 self._timeout_occurred = True
                 raise
             except error as e:
-                if e.args[0] in _blocking_errnos:
+                if e.errno in _blocking_errnos:
                     return None
                 raise
 
@@ -722,7 +723,7 @@
             return self._sock.send(b)
         except error as e:
             # XXX what about EINTR?
-            if e.args[0] in _blocking_errnos:
+            if e.errno in _blocking_errnos:
                 return None
             raise
 
@@ -781,8 +782,9 @@
     An empty argument is interpreted as meaning the local host.
 
     First the hostname returned by gethostbyaddr() is checked, then
-    possibly existing aliases. In case no FQDN is available, hostname
-    from gethostname() is returned.
+    possibly existing aliases. In case no FQDN is available and `name`
+    was given, it is returned unchanged. If `name` was empty or '0.0.0.0',
+    hostname from gethostname() is returned.
     """
     name = name.strip()
     if not name or name == '0.0.0.0':
diff --git a/Lib/socketserver.py b/Lib/socketserver.py
index 57c1ae6..0d9583d 100644
--- a/Lib/socketserver.py
+++ b/Lib/socketserver.py
@@ -628,6 +628,39 @@
             self.collect_children(blocking=self.block_on_close)
 
 
+class _Threads(list):
+    """
+    Joinable list of all non-daemon threads.
+    """
+    def append(self, thread):
+        self.reap()
+        if thread.daemon:
+            return
+        super().append(thread)
+
+    def pop_all(self):
+        self[:], result = [], self[:]
+        return result
+
+    def join(self):
+        for thread in self.pop_all():
+            thread.join()
+
+    def reap(self):
+        self[:] = (thread for thread in self if thread.is_alive())
+
+
+class _NoThreads:
+    """
+    Degenerate version of _Threads.
+    """
+    def append(self, thread):
+        pass
+
+    def join(self):
+        pass
+
+
 class ThreadingMixIn:
     """Mix-in class to handle each request in a new thread."""
 
@@ -636,9 +669,9 @@
     daemon_threads = False
     # If true, server_close() waits until all non-daemonic threads terminate.
     block_on_close = True
-    # For non-daemonic threads, list of threading.Threading objects
+    # Threads object
     # used by server_close() to wait for all threads completion.
-    _threads = None
+    _threads = _NoThreads()
 
     def process_request_thread(self, request, client_address):
         """Same as in BaseServer but as a thread.
@@ -655,23 +688,17 @@
 
     def process_request(self, request, client_address):
         """Start a new thread to process the request."""
+        if self.block_on_close:
+            vars(self).setdefault('_threads', _Threads())
         t = threading.Thread(target = self.process_request_thread,
                              args = (request, client_address))
         t.daemon = self.daemon_threads
-        if not t.daemon and self.block_on_close:
-            if self._threads is None:
-                self._threads = []
-            self._threads.append(t)
+        self._threads.append(t)
         t.start()
 
     def server_close(self):
         super().server_close()
-        if self.block_on_close:
-            threads = self._threads
-            self._threads = None
-            if threads:
-                for thread in threads:
-                    thread.join()
+        self._threads.join()
 
 
 if hasattr(os, "fork"):
diff --git a/Lib/sqlite3/__init__.py b/Lib/sqlite3/__init__.py
index 6c91df2..0dedf18 100644
--- a/Lib/sqlite3/__init__.py
+++ b/Lib/sqlite3/__init__.py
@@ -20,4 +20,52 @@
 #    misrepresented as being the original software.
 # 3. This notice may not be removed or altered from any source distribution.
 
+"""
+The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compilant
+interface to the SQLite library, and requires SQLite 3.7.15 or newer.
+
+To use the module, start by creating a database Connection object:
+
+    import sqlite3
+    cx = sqlite3.connect("test.db")  # test.db will be created or opened
+
+The special path name ":memory:" can be provided to connect to a transient
+in-memory database:
+
+    cx = sqlite3.connect(":memory:")  # connect to a database in RAM
+
+Once a connection has been established, create a Cursor object and call
+its execute() method to perform SQL queries:
+
+    cu = cx.cursor()
+
+    # create a table
+    cu.execute("create table lang(name, first_appeared)")
+
+    # insert values into a table
+    cu.execute("insert into lang values (?, ?)", ("C", 1972))
+
+    # execute a query and iterate over the result
+    for row in cu.execute("select * from lang"):
+        print(row)
+
+    cx.close()
+
+The sqlite3 module is written by Gerhard Häring <gh@ghaering.de>.
+"""
+
 from sqlite3.dbapi2 import *
+
+
+# bpo-42264: OptimizedUnicode was deprecated in Python 3.10.  It's scheduled
+# for removal in Python 3.12.
+def __getattr__(name):
+    if name == "OptimizedUnicode":
+        import warnings
+        msg = ("""
+            OptimizedUnicode is deprecated and will be removed in Python 3.12.
+            Since Python 3.3 it has simply been an alias for 'str'.
+        """)
+        warnings.warn(msg, DeprecationWarning, stacklevel=2)
+        return str
+    raise AttributeError(f"module 'sqlite3' has no attribute '{name}'")
diff --git a/Lib/sqlite3/dbapi2.py b/Lib/sqlite3/dbapi2.py
index 991682c..cfe6225 100644
--- a/Lib/sqlite3/dbapi2.py
+++ b/Lib/sqlite3/dbapi2.py
@@ -84,6 +84,20 @@
 
 register_adapters_and_converters()
 
+# bpo-24464: enable_shared_cache was deprecated in Python 3.10.  It's
+# scheduled for removal in Python 3.12.
+def enable_shared_cache(enable):
+    from _sqlite3 import enable_shared_cache as _old_enable_shared_cache
+    import warnings
+    msg = (
+        "enable_shared_cache is deprecated and will be removed in Python 3.12. "
+        "Shared cache is strongly discouraged by the SQLite 3 documentation. "
+        "If shared cache must be used, open the database in URI mode using"
+        "the cache=shared query parameter."
+    )
+    warnings.warn(msg, DeprecationWarning, stacklevel=2)
+    return _old_enable_shared_cache(enable)
+
 # Clean up namespace
 
 del(register_adapters_and_converters)
diff --git a/Lib/ssl.py b/Lib/ssl.py
index 30f4e59..181065d 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -253,7 +253,7 @@
     from _ssl import enum_certificates, enum_crls
 
 from socket import socket, SOCK_STREAM, create_connection
-from socket import SOL_SOCKET, SO_TYPE
+from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT
 import socket as _socket
 import base64        # for DER-to-PEM translation
 import errno
@@ -381,6 +381,11 @@
     CertificateError is raised on failure. On success, the function
     returns nothing.
     """
+    warnings.warn(
+        "ssl.match_hostname() is deprecated",
+        category=DeprecationWarning,
+        stacklevel=2
+    )
     if not cert:
         raise ValueError("empty or no certificate, match_hostname needs a "
                          "SSL socket or SSL context with either "
@@ -479,7 +484,14 @@
     sslsocket_class = None  # SSLSocket is assigned later.
     sslobject_class = None  # SSLObject is assigned later.
 
-    def __new__(cls, protocol=PROTOCOL_TLS, *args, **kwargs):
+    def __new__(cls, protocol=None, *args, **kwargs):
+        if protocol is None:
+            warnings.warn(
+                "ssl.SSLContext() without protocol argument is deprecated.",
+                category=DeprecationWarning,
+                stacklevel=2
+            )
+            protocol = PROTOCOL_TLS
         self = _SSLContext.__new__(cls, protocol)
         return self
 
@@ -518,6 +530,11 @@
         )
 
     def set_npn_protocols(self, npn_protocols):
+        warnings.warn(
+            "ssl NPN is deprecated, use ALPN instead",
+            DeprecationWarning,
+            stacklevel=2
+        )
         protos = bytearray()
         for protocol in npn_protocols:
             b = bytes(protocol, 'ascii')
@@ -734,12 +751,15 @@
     # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
     # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
     # by default.
-    context = SSLContext(PROTOCOL_TLS)
-
     if purpose == Purpose.SERVER_AUTH:
         # verify certs and host name in client mode
+        context = SSLContext(PROTOCOL_TLS_CLIENT)
         context.verify_mode = CERT_REQUIRED
         context.check_hostname = True
+    elif purpose == Purpose.CLIENT_AUTH:
+        context = SSLContext(PROTOCOL_TLS_SERVER)
+    else:
+        raise ValueError(purpose)
 
     if cafile or capath or cadata:
         context.load_verify_locations(cafile, capath, cadata)
@@ -755,7 +775,7 @@
             context.keylog_filename = keylogfile
     return context
 
-def _create_unverified_context(protocol=PROTOCOL_TLS, *, cert_reqs=CERT_NONE,
+def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE,
                            check_hostname=False, purpose=Purpose.SERVER_AUTH,
                            certfile=None, keyfile=None,
                            cafile=None, capath=None, cadata=None):
@@ -772,10 +792,18 @@
     # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
     # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
     # by default.
-    context = SSLContext(protocol)
+    if purpose == Purpose.SERVER_AUTH:
+        # verify certs and host name in client mode
+        if protocol is None:
+            protocol = PROTOCOL_TLS_CLIENT
+    elif purpose == Purpose.CLIENT_AUTH:
+        if protocol is None:
+            protocol = PROTOCOL_TLS_SERVER
+    else:
+        raise ValueError(purpose)
 
-    if not check_hostname:
-        context.check_hostname = False
+    context = SSLContext(protocol)
+    context.check_hostname = check_hostname
     if cert_reqs is not None:
         context.verify_mode = cert_reqs
     if check_hostname:
@@ -909,15 +937,17 @@
         """Return the currently selected NPN protocol as a string, or ``None``
         if a next protocol was not negotiated or if NPN is not supported by one
         of the peers."""
-        if _ssl.HAS_NPN:
-            return self._sslobj.selected_npn_protocol()
+        warnings.warn(
+            "ssl NPN is deprecated, use ALPN instead",
+            DeprecationWarning,
+            stacklevel=2
+        )
 
     def selected_alpn_protocol(self):
         """Return the currently selected ALPN protocol as a string, or ``None``
         if a next protocol was not negotiated or if ALPN is not supported by one
         of the peers."""
-        if _ssl.HAS_ALPN:
-            return self._sslobj.selected_alpn_protocol()
+        return self._sslobj.selected_alpn_protocol()
 
     def cipher(self):
         """Return the currently selected cipher as a 3-tuple ``(name,
@@ -1126,10 +1156,12 @@
     @_sslcopydoc
     def selected_npn_protocol(self):
         self._checkClosed()
-        if self._sslobj is None or not _ssl.HAS_NPN:
-            return None
-        else:
-            return self._sslobj.selected_npn_protocol()
+        warnings.warn(
+            "ssl NPN is deprecated, use ALPN instead",
+            DeprecationWarning,
+            stacklevel=2
+        )
+        return None
 
     @_sslcopydoc
     def selected_alpn_protocol(self):
@@ -1388,7 +1420,11 @@
                 do_handshake_on_connect=True,
                 suppress_ragged_eofs=True,
                 ciphers=None):
-
+    warnings.warn(
+        "ssl.wrap_socket() is deprecated, use SSLContext.wrap_socket()",
+        category=DeprecationWarning,
+        stacklevel=2
+    )
     if server_side and not certfile:
         raise ValueError("certfile must be specified for server-side "
                          "operations")
@@ -1466,11 +1502,14 @@
     d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
     return base64.decodebytes(d.encode('ASCII', 'strict'))
 
-def get_server_certificate(addr, ssl_version=PROTOCOL_TLS, ca_certs=None):
+def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT,
+                           ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
     """Retrieve the certificate from the server at the specified address,
     and return it as a PEM-encoded string.
     If 'ca_certs' is specified, validate the server cert against it.
-    If 'ssl_version' is specified, use it in the connection attempt."""
+    If 'ssl_version' is specified, use it in the connection attempt.
+    If 'timeout' is specified, use it in the connection attempt.
+    """
 
     host, port = addr
     if ca_certs is not None:
@@ -1480,8 +1519,8 @@
     context = _create_stdlib_context(ssl_version,
                                      cert_reqs=cert_reqs,
                                      cafile=ca_certs)
-    with  create_connection(addr) as sock:
-        with context.wrap_socket(sock) as sslsock:
+    with create_connection(addr, timeout=timeout) as sock:
+        with context.wrap_socket(sock, server_hostname=host) as sslsock:
             dercert = sslsock.getpeercert(True)
     return DER_cert_to_PEM_cert(dercert)
 
diff --git a/Lib/statistics.py b/Lib/statistics.py
index f9d3802..f662453 100644
--- a/Lib/statistics.py
+++ b/Lib/statistics.py
@@ -73,6 +73,30 @@
 2.5
 
 
+Statistics for relations between two inputs
+-------------------------------------------
+
+==================  ====================================================
+Function            Description
+==================  ====================================================
+covariance          Sample covariance for two variables.
+correlation         Pearson's correlation coefficient for two variables.
+linear_regression   Intercept and slope for simple linear regression.
+==================  ====================================================
+
+Calculate covariance, Pearson's correlation, and simple linear regression
+for two inputs:
+
+>>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+>>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3]
+>>> covariance(x, y)
+0.75
+>>> correlation(x, y)  #doctest: +ELLIPSIS
+0.31622776601...
+>>> linear_regression(x, y)  #doctest:
+LinearRegression(slope=0.1, intercept=1.5)
+
+
 Exceptions
 ----------
 
@@ -83,9 +107,12 @@
 __all__ = [
     'NormalDist',
     'StatisticsError',
+    'correlation',
+    'covariance',
     'fmean',
     'geometric_mean',
     'harmonic_mean',
+    'linear_regression',
     'mean',
     'median',
     'median_grouped',
@@ -106,11 +133,11 @@
 
 from fractions import Fraction
 from decimal import Decimal
-from itertools import groupby
+from itertools import groupby, repeat
 from bisect import bisect_left, bisect_right
 from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum
 from operator import itemgetter
-from collections import Counter
+from collections import Counter, namedtuple
 
 # === Exceptions ===
 
@@ -120,21 +147,17 @@
 
 # === Private utilities ===
 
-def _sum(data, start=0):
-    """_sum(data [, start]) -> (type, sum, count)
+def _sum(data):
+    """_sum(data) -> (type, sum, count)
 
     Return a high-precision sum of the given numeric data as a fraction,
     together with the type to be converted to and the count of items.
 
-    If optional argument ``start`` is given, it is added to the total.
-    If ``data`` is empty, ``start`` (defaulting to 0) is returned.
-
-
     Examples
     --------
 
-    >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
-    (<class 'float'>, Fraction(11, 1), 5)
+    >>> _sum([3, 2.25, 4.5, -0.5, 0.25])
+    (<class 'float'>, Fraction(19, 2), 5)
 
     Some sources of round-off error will be avoided:
 
@@ -157,10 +180,9 @@
     allowed.
     """
     count = 0
-    n, d = _exact_ratio(start)
-    partials = {d: n}
+    partials = {}
     partials_get = partials.get
-    T = _coerce(int, type(start))
+    T = int
     for typ, values in groupby(data, type):
         T = _coerce(T, typ)  # or raise TypeError
         for n, d in map(_exact_ratio, values):
@@ -173,8 +195,7 @@
         assert not _isfinite(total)
     else:
         # Sum all the partial sums using builtin sum.
-        # FIXME is this faster if we sum them in order of the denominator?
-        total = sum(Fraction(n, d) for d, n in sorted(partials.items()))
+        total = sum(Fraction(n, d) for d, n in partials.items())
     return (T, total, count)
 
 
@@ -225,27 +246,19 @@
     x is expected to be an int, Fraction, Decimal or float.
     """
     try:
-        # Optimise the common case of floats. We expect that the most often
-        # used numeric type will be builtin floats, so try to make this as
-        # fast as possible.
-        if type(x) is float or type(x) is Decimal:
-            return x.as_integer_ratio()
-        try:
-            # x may be an int, Fraction, or Integral ABC.
-            return (x.numerator, x.denominator)
-        except AttributeError:
-            try:
-                # x may be a float or Decimal subclass.
-                return x.as_integer_ratio()
-            except AttributeError:
-                # Just give up?
-                pass
+        return x.as_integer_ratio()
+    except AttributeError:
+        pass
     except (OverflowError, ValueError):
         # float NAN or INF.
         assert not _isfinite(x)
         return (x, None)
-    msg = "can't convert type '{}' to numerator/denominator"
-    raise TypeError(msg.format(type(x).__name__))
+    try:
+        # x may be an Integral ABC.
+        return (x.numerator, x.denominator)
+    except AttributeError:
+        msg = f"can't convert type '{type(x).__name__}' to numerator/denominator"
+        raise TypeError(msg)
 
 
 def _convert(value, T):
@@ -361,40 +374,39 @@
         return exp(fmean(map(log, data)))
     except ValueError:
         raise StatisticsError('geometric mean requires a non-empty dataset '
-                              ' containing positive numbers') from None
+                              'containing positive numbers') from None
 
 
-def harmonic_mean(data):
+def harmonic_mean(data, weights=None):
     """Return the harmonic mean of data.
 
-    The harmonic mean, sometimes called the subcontrary mean, is the
-    reciprocal of the arithmetic mean of the reciprocals of the data,
-    and is often appropriate when averaging quantities which are rates
-    or ratios, for example speeds. Example:
+    The harmonic mean is the reciprocal of the arithmetic mean of the
+    reciprocals of the data.  It can be used for averaging ratios or
+    rates, for example speeds.
 
-    Suppose an investor purchases an equal value of shares in each of
-    three companies, with P/E (price/earning) ratios of 2.5, 3 and 10.
-    What is the average P/E ratio for the investor's portfolio?
+    Suppose a car travels 40 km/hr for 5 km and then speeds-up to
+    60 km/hr for another 5 km. What is the average speed?
 
-    >>> harmonic_mean([2.5, 3, 10])  # For an equal investment portfolio.
-    3.6
+        >>> harmonic_mean([40, 60])
+        48.0
 
-    Using the arithmetic mean would give an average of about 5.167, which
-    is too high.
+    Suppose a car travels 40 km/hr for 5 km, and when traffic clears,
+    speeds-up to 60 km/hr for the remaining 30 km of the journey. What
+    is the average speed?
+
+        >>> harmonic_mean([40, 60], weights=[5, 30])
+        56.0
 
     If ``data`` is empty, or any element is less than zero,
     ``harmonic_mean`` will raise ``StatisticsError``.
     """
-    # For a justification for using harmonic mean for P/E ratios, see
-    # http://fixthepitch.pellucid.com/comps-analysis-the-missing-harmony-of-summary-statistics/
-    # http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621087
     if iter(data) is data:
         data = list(data)
     errmsg = 'harmonic mean does not support negative values'
     n = len(data)
     if n < 1:
         raise StatisticsError('harmonic_mean requires at least one data point')
-    elif n == 1:
+    elif n == 1 and weights is None:
         x = data[0]
         if isinstance(x, (numbers.Real, Decimal)):
             if x < 0:
@@ -402,13 +414,23 @@
             return x
         else:
             raise TypeError('unsupported type')
+    if weights is None:
+        weights = repeat(1, n)
+        sum_weights = n
+    else:
+        if iter(weights) is weights:
+            weights = list(weights)
+        if len(weights) != n:
+            raise StatisticsError('Number of weights does not match data size')
+        _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg))
     try:
-        T, total, count = _sum(1 / x for x in _fail_neg(data, errmsg))
+        data = _fail_neg(data, errmsg)
+        T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data))
     except ZeroDivisionError:
         return 0
-    assert count == n
-    return _convert(n / total, T)
-
+    if total <= 0:
+        raise StatisticsError('Weighted sum must be positive')
+    return _convert(sum_weights / total, T)
 
 # FIXME: investigate ways to calculate medians without sorting? Quickselect?
 def median(data):
@@ -683,14 +705,20 @@
     if c is not None:
         T, total, count = _sum((x-c)**2 for x in data)
         return (T, total)
-    c = mean(data)
-    T, total, count = _sum((x-c)**2 for x in data)
-    # The following sum should mathematically equal zero, but due to rounding
-    # error may not.
-    U, total2, count2 = _sum((x - c) for x in data)
-    assert T == U and count == count2
-    total -= total2 ** 2 / len(data)
-    assert not total < 0, 'negative sum of square deviations: %f' % total
+    T, total, count = _sum(data)
+    mean_n, mean_d = (total / count).as_integer_ratio()
+    partials = Counter()
+    for n, d in map(_exact_ratio, data):
+        diff_n = n * mean_d - d * mean_n
+        diff_d = d * mean_d
+        partials[diff_d * diff_d] += diff_n * diff_n
+    if None in partials:
+        # The sum will be a NAN or INF. We can ignore all the finite
+        # partials, and just look at this special one.
+        total = partials[None]
+        assert not _isfinite(total)
+    else:
+        total = sum(Fraction(n, d) for d, n in partials.items())
     return (T, total)
 
 
@@ -794,6 +822,9 @@
     1.0810874155219827
 
     """
+    # Fixme: Despite the exact sum of squared deviations, some inaccuracy
+    # remain because there are two rounding steps.  The first occurs in
+    # the _convert() step for variance(), the second occurs in math.sqrt().
     var = variance(data, xbar)
     try:
         return var.sqrt()
@@ -810,6 +841,9 @@
     0.986893273527251
 
     """
+    # Fixme: Despite the exact sum of squared deviations, some inaccuracy
+    # remain because there are two rounding steps.  The first occurs in
+    # the _convert() step for pvariance(), the second occurs in math.sqrt().
     var = pvariance(data, mu)
     try:
         return var.sqrt()
@@ -817,6 +851,119 @@
         return math.sqrt(var)
 
 
+# === Statistics for relations between two inputs ===
+
+# See https://en.wikipedia.org/wiki/Covariance
+#     https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
+#     https://en.wikipedia.org/wiki/Simple_linear_regression
+
+
+def covariance(x, y, /):
+    """Covariance
+
+    Return the sample covariance of two inputs *x* and *y*. Covariance
+    is a measure of the joint variability of two inputs.
+
+    >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+    >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3]
+    >>> covariance(x, y)
+    0.75
+    >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1]
+    >>> covariance(x, z)
+    -7.5
+    >>> covariance(z, x)
+    -7.5
+
+    """
+    n = len(x)
+    if len(y) != n:
+        raise StatisticsError('covariance requires that both inputs have same number of data points')
+    if n < 2:
+        raise StatisticsError('covariance requires at least two data points')
+    xbar = fsum(x) / n
+    ybar = fsum(y) / n
+    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
+    return sxy / (n - 1)
+
+
+def correlation(x, y, /):
+    """Pearson's correlation coefficient
+
+    Return the Pearson's correlation coefficient for two inputs. Pearson's
+    correlation coefficient *r* takes values between -1 and +1. It measures the
+    strength and direction of the linear relationship, where +1 means very
+    strong, positive linear relationship, -1 very strong, negative linear
+    relationship, and 0 no linear relationship.
+
+    >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+    >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1]
+    >>> correlation(x, x)
+    1.0
+    >>> correlation(x, y)
+    -1.0
+
+    """
+    n = len(x)
+    if len(y) != n:
+        raise StatisticsError('correlation requires that both inputs have same number of data points')
+    if n < 2:
+        raise StatisticsError('correlation requires at least two data points')
+    xbar = fsum(x) / n
+    ybar = fsum(y) / n
+    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
+    sxx = fsum((xi - xbar) ** 2.0 for xi in x)
+    syy = fsum((yi - ybar) ** 2.0 for yi in y)
+    try:
+        return sxy / sqrt(sxx * syy)
+    except ZeroDivisionError:
+        raise StatisticsError('at least one of the inputs is constant')
+
+
+LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept'))
+
+
+def linear_regression(x, y, /):
+    """Slope and intercept for simple linear regression.
+
+    Return the slope and intercept of simple linear regression
+    parameters estimated using ordinary least squares. Simple linear
+    regression describes relationship between an independent variable
+    *x* and a dependent variable *y* in terms of linear function:
+
+        y = slope * x + intercept + noise
+
+    where *slope* and *intercept* are the regression parameters that are
+    estimated, and noise represents the variability of the data that was
+    not explained by the linear regression (it is equal to the
+    difference between predicted and actual values of the dependent
+    variable).
+
+    The parameters are returned as a named tuple.
+
+    >>> x = [1, 2, 3, 4, 5]
+    >>> noise = NormalDist().samples(5, seed=42)
+    >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)]
+    >>> linear_regression(x, y)  #doctest: +ELLIPSIS
+    LinearRegression(slope=3.09078914170..., intercept=1.75684970486...)
+
+    """
+    n = len(x)
+    if len(y) != n:
+        raise StatisticsError('linear regression requires that both inputs have same number of data points')
+    if n < 2:
+        raise StatisticsError('linear regression requires at least two data points')
+    xbar = fsum(x) / n
+    ybar = fsum(y) / n
+    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
+    sxx = fsum((xi - xbar) ** 2.0 for xi in x)
+    try:
+        slope = sxy / sxx   # equivalent to:  covariance(x, y) / variance(x)
+    except ZeroDivisionError:
+        raise StatisticsError('x is constant')
+    intercept = ybar - slope * xbar
+    return LinearRegression(slope=slope, intercept=intercept)
+
+
 ## Normal Distribution #####################################################
 
 
diff --git a/Lib/subprocess.py b/Lib/subprocess.py
index f1d829a..ccb46a6 100644
--- a/Lib/subprocess.py
+++ b/Lib/subprocess.py
@@ -5,7 +5,6 @@
 # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
 #
 # Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
 
 r"""Subprocesses with accessible I/O streams
 
@@ -55,13 +54,10 @@
 import types
 
 try:
-    import pwd
+    import fcntl
 except ImportError:
-    pwd = None
-try:
-    import grp
-except ImportError:
-    grp = None
+    fcntl = None
+
 
 __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
            "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL",
@@ -326,7 +322,7 @@
     if dev_mode:
         args.extend(('-X', 'dev'))
     for opt in ('faulthandler', 'tracemalloc', 'importtime',
-                'showrefcount', 'utf8', 'oldparser'):
+                'showrefcount', 'utf8'):
         if opt in xoptions:
             value = xoptions[opt]
             if value is True:
@@ -415,7 +411,11 @@
     if 'input' in kwargs and kwargs['input'] is None:
         # Explicitly passing input=None was previously equivalent to passing an
         # empty string. That is maintained here for backwards compatibility.
-        kwargs['input'] = '' if kwargs.get('universal_newlines', False) else b''
+        if kwargs.get('universal_newlines') or kwargs.get('text'):
+            empty = ''
+        else:
+            empty = b''
+        kwargs['input'] = empty
 
     return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
                **kwargs).stdout
@@ -660,8 +660,9 @@
         # os.posix_spawn() is not available
         return False
 
-    if sys.platform == 'darwin':
-        # posix_spawn() is a syscall on macOS and properly reports errors
+    if sys.platform in ('darwin', 'sunos5'):
+        # posix_spawn() is a syscall on both macOS and Solaris,
+        # and properly reports errors
         return True
 
     # Check libc name and runtime libc version
@@ -693,7 +694,7 @@
 _USE_POSIX_SPAWN = _use_posix_spawn()
 
 
-class Popen(object):
+class Popen:
     """ Execute a child program in a new process.
 
     For a complete description of the arguments see the Python documentation.
@@ -756,7 +757,7 @@
                  startupinfo=None, creationflags=0,
                  restore_signals=True, start_new_session=False,
                  pass_fds=(), *, user=None, group=None, extra_groups=None,
-                 encoding=None, errors=None, text=None, umask=-1):
+                 encoding=None, errors=None, text=None, umask=-1, pipesize=-1):
         """Create new Popen instance."""
         _cleanup()
         # Held while anything is calling waitpid before returncode has been
@@ -773,6 +774,11 @@
         if not isinstance(bufsize, int):
             raise TypeError("bufsize must be an integer")
 
+        if pipesize is None:
+            pipesize = -1  # Restore default
+        if not isinstance(pipesize, int):
+            raise TypeError("pipesize must be an integer")
+
         if _mswindows:
             if preexec_fn is not None:
                 raise ValueError("preexec_fn is not supported on Windows "
@@ -797,6 +803,7 @@
         self.returncode = None
         self.encoding = encoding
         self.errors = errors
+        self.pipesize = pipesize
 
         # Validate the combinations of text and universal_newlines
         if (text is not None and universal_newlines is not None
@@ -838,6 +845,13 @@
 
         self.text_mode = encoding or errors or text or universal_newlines
 
+        # PEP 597: We suppress the EncodingWarning in subprocess module
+        # for now (at Python 3.10), because we focus on files for now.
+        # This will be changed to encoding = io.text_encoding(encoding)
+        # in the future.
+        if self.text_mode and encoding is None:
+            self.encoding = encoding = "locale"
+
         # How long to resume waiting on a child after the first ^C.
         # There is no right value for this.  The purpose is to be polite
         # yet remain good for interactive users trying to exit a tool.
@@ -861,7 +875,9 @@
                                  "current platform")
 
             elif isinstance(group, str):
-                if grp is None:
+                try:
+                    import grp
+                except ImportError:
                     raise ValueError("The group parameter cannot be a string "
                                      "on systems without the grp module")
 
@@ -887,7 +903,9 @@
             gids = []
             for extra_group in extra_groups:
                 if isinstance(extra_group, str):
-                    if grp is None:
+                    try:
+                        import grp
+                    except ImportError:
                         raise ValueError("Items in extra_groups cannot be "
                                          "strings on systems without the "
                                          "grp module")
@@ -913,10 +931,11 @@
                                  "the current platform")
 
             elif isinstance(user, str):
-                if pwd is None:
+                try:
+                    import pwd
+                except ImportError:
                     raise ValueError("The user parameter cannot be a string "
                                      "on systems without the pwd module")
-
                 uid = pwd.getpwnam(user).pw_uid
             elif isinstance(user, int):
                 uid = user
@@ -985,7 +1004,7 @@
     def __repr__(self):
         obj_repr = (
             f"<{self.__class__.__name__}: "
-            f"returncode: {self.returncode} args: {list(self.args)!r}>"
+            f"returncode: {self.returncode} args: {self.args!r}>"
         )
         if len(obj_repr) > 80:
             obj_repr = obj_repr[:76] + "...>"
@@ -1521,10 +1540,8 @@
                 self.stderr.close()
 
             # All data exchanged.  Translate lists into strings.
-            if stdout is not None:
-                stdout = stdout[0]
-            if stderr is not None:
-                stderr = stderr[0]
+            stdout = stdout[0] if stdout else None
+            stderr = stderr[0] if stderr else None
 
             return (stdout, stderr)
 
@@ -1575,6 +1592,8 @@
                 pass
             elif stdin == PIPE:
                 p2cread, p2cwrite = os.pipe()
+                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+                    fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stdin == DEVNULL:
                 p2cread = self._get_devnull()
             elif isinstance(stdin, int):
@@ -1587,6 +1606,8 @@
                 pass
             elif stdout == PIPE:
                 c2pread, c2pwrite = os.pipe()
+                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+                    fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stdout == DEVNULL:
                 c2pwrite = self._get_devnull()
             elif isinstance(stdout, int):
@@ -1599,6 +1620,8 @@
                 pass
             elif stderr == PIPE:
                 errread, errwrite = os.pipe()
+                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+                    fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stderr == STDOUT:
                 if c2pwrite != -1:
                     errwrite = c2pwrite
diff --git a/Lib/symbol.py b/Lib/symbol.py
deleted file mode 100644
index aaac8c9..0000000
--- a/Lib/symbol.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""Non-terminal symbols of Python grammar (from "graminit.h")."""
-
-#  This file is automatically generated; please don't muck it up!
-#
-#  To update the symbols in this file, 'cd' to the top directory of
-#  the python source tree after building the interpreter and run:
-#
-#    python3 Tools/scripts/generate_symbol_py.py Include/graminit.h Lib/symbol.py
-#
-# or just
-#
-#    make regen-symbol
-
-import warnings
-
-warnings.warn(
-    "The symbol module is deprecated and will be removed "
-    "in future versions of Python",
-    DeprecationWarning,
-    stacklevel=2,
-)
-
-#--start constants--
-single_input = 256
-file_input = 257
-eval_input = 258
-decorator = 259
-decorators = 260
-decorated = 261
-async_funcdef = 262
-funcdef = 263
-parameters = 264
-typedargslist = 265
-tfpdef = 266
-varargslist = 267
-vfpdef = 268
-stmt = 269
-simple_stmt = 270
-small_stmt = 271
-expr_stmt = 272
-annassign = 273
-testlist_star_expr = 274
-augassign = 275
-del_stmt = 276
-pass_stmt = 277
-flow_stmt = 278
-break_stmt = 279
-continue_stmt = 280
-return_stmt = 281
-yield_stmt = 282
-raise_stmt = 283
-import_stmt = 284
-import_name = 285
-import_from = 286
-import_as_name = 287
-dotted_as_name = 288
-import_as_names = 289
-dotted_as_names = 290
-dotted_name = 291
-global_stmt = 292
-nonlocal_stmt = 293
-assert_stmt = 294
-compound_stmt = 295
-async_stmt = 296
-if_stmt = 297
-while_stmt = 298
-for_stmt = 299
-try_stmt = 300
-with_stmt = 301
-with_item = 302
-except_clause = 303
-suite = 304
-namedexpr_test = 305
-test = 306
-test_nocond = 307
-lambdef = 308
-lambdef_nocond = 309
-or_test = 310
-and_test = 311
-not_test = 312
-comparison = 313
-comp_op = 314
-star_expr = 315
-expr = 316
-xor_expr = 317
-and_expr = 318
-shift_expr = 319
-arith_expr = 320
-term = 321
-factor = 322
-power = 323
-atom_expr = 324
-atom = 325
-testlist_comp = 326
-trailer = 327
-subscriptlist = 328
-subscript = 329
-sliceop = 330
-exprlist = 331
-testlist = 332
-dictorsetmaker = 333
-classdef = 334
-arglist = 335
-argument = 336
-comp_iter = 337
-sync_comp_for = 338
-comp_for = 339
-comp_if = 340
-encoding_decl = 341
-yield_expr = 342
-yield_arg = 343
-func_body_suite = 344
-func_type_input = 345
-func_type = 346
-typelist = 347
-#--end constants--
-
-sym_name = {}
-for _name, _value in list(globals().items()):
-    if type(_value) is type(0):
-        sym_name[_value] = _name
-del _name, _value
diff --git a/Lib/symtable.py b/Lib/symtable.py
index 521540f..98db1e2 100644
--- a/Lib/symtable.py
+++ b/Lib/symtable.py
@@ -10,6 +10,11 @@
 __all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
 
 def symtable(code, filename, compile_type):
+    """ Return the toplevel *SymbolTable* for the source code.
+
+    *filename* is the name of the file with the code
+    and *compile_type* is the *compile()* mode argument.
+    """
     top = _symtable.symtable(code, filename, compile_type)
     return _newSymbolTable(top, filename)
 
@@ -55,6 +60,11 @@
                                                             self._filename)
 
     def get_type(self):
+        """Return the type of the symbol table.
+
+        The values retuned are 'class', 'module' and
+        'function'.
+        """
         if self._table.type == _symtable.TYPE_MODULE:
             return "module"
         if self._table.type == _symtable.TYPE_FUNCTION:
@@ -65,27 +75,51 @@
                "unexpected type: {0}".format(self._table.type)
 
     def get_id(self):
+        """Return an identifier for the table.
+        """
         return self._table.id
 
     def get_name(self):
+        """Return the table's name.
+
+        This corresponds to the name of the class, function
+        or 'top' if the table is for a class, function or
+        global respectively.
+        """
         return self._table.name
 
     def get_lineno(self):
+        """Return the number of the first line in the
+        block for the table.
+        """
         return self._table.lineno
 
     def is_optimized(self):
+        """Return *True* if the locals in the table
+        are optimizable.
+        """
         return bool(self._table.type == _symtable.TYPE_FUNCTION)
 
     def is_nested(self):
+        """Return *True* if the block is a nested class
+        or function."""
         return bool(self._table.nested)
 
     def has_children(self):
+        """Return *True* if the block has nested namespaces.
+        """
         return bool(self._table.children)
 
     def get_identifiers(self):
+        """Return a list of names of symbols in the table.
+        """
         return self._table.symbols.keys()
 
     def lookup(self, name):
+        """Lookup a *name* in the table.
+
+        Returns a *Symbol* instance.
+        """
         sym = self._symbols.get(name)
         if sym is None:
             flags = self._table.symbols[name]
@@ -96,6 +130,9 @@
         return sym
 
     def get_symbols(self):
+        """Return a list of *Symbol* instances for
+        names in the table.
+        """
         return [self.lookup(ident) for ident in self.get_identifiers()]
 
     def __check_children(self, name):
@@ -104,6 +141,8 @@
                 if st.name == name]
 
     def get_children(self):
+        """Return a list of the nested symbol tables.
+        """
         return [_newSymbolTable(st, self._filename)
                 for st in self._table.children]
 
@@ -122,11 +161,15 @@
                      if test_func(self._table.symbols[ident]))
 
     def get_parameters(self):
+        """Return a tuple of parameters to the function.
+        """
         if self.__params is None:
             self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
         return self.__params
 
     def get_locals(self):
+        """Return a tuple of locals in the function.
+        """
         if self.__locals is None:
             locs = (LOCAL, CELL)
             test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
@@ -134,6 +177,8 @@
         return self.__locals
 
     def get_globals(self):
+        """Return a tuple of globals in the function.
+        """
         if self.__globals is None:
             glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
             test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
@@ -141,11 +186,15 @@
         return self.__globals
 
     def get_nonlocals(self):
+        """Return a tuple of nonlocals in the function.
+        """
         if self.__nonlocals is None:
             self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL)
         return self.__nonlocals
 
     def get_frees(self):
+        """Return a tuple of free variables in the function.
+        """
         if self.__frees is None:
             is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
             self.__frees = self.__idents_matching(is_free)
@@ -157,6 +206,8 @@
     __methods = None
 
     def get_methods(self):
+        """Return a tuple of methods declared in the class.
+        """
         if self.__methods is None:
             d = {}
             for st in self._table.children:
@@ -178,12 +229,19 @@
         return "<symbol {0!r}>".format(self.__name)
 
     def get_name(self):
+        """Return a name of a symbol.
+        """
         return self.__name
 
     def is_referenced(self):
+        """Return *True* if the symbol is used in
+        its block.
+        """
         return bool(self.__flags & _symtable.USE)
 
     def is_parameter(self):
+        """Return *True* if the symbol is a parameter.
+        """
         return bool(self.__flags & DEF_PARAM)
 
     def is_global(self):
@@ -193,9 +251,12 @@
                     or (self.__module_scope and self.__flags & DEF_BOUND))
 
     def is_nonlocal(self):
+        """Return *True* if the symbol is nonlocal."""
         return bool(self.__flags & DEF_NONLOCAL)
 
     def is_declared_global(self):
+        """Return *True* if the symbol is declared global
+        with a global statement."""
         return bool(self.__scope == GLOBAL_EXPLICIT)
 
     def is_local(self):
@@ -205,19 +266,28 @@
                     or (self.__module_scope and self.__flags & DEF_BOUND))
 
     def is_annotated(self):
+        """Return *True* if the symbol is annotated.
+        """
         return bool(self.__flags & DEF_ANNOT)
 
     def is_free(self):
+        """Return *True* if a referenced symbol is
+        not assigned to.
+        """
         return bool(self.__scope == FREE)
 
     def is_imported(self):
+        """Return *True* if the symbol is created from
+        an import statement.
+        """
         return bool(self.__flags & DEF_IMPORT)
 
     def is_assigned(self):
+        """Return *True* if a symbol is assigned to."""
         return bool(self.__flags & DEF_LOCAL)
 
     def is_namespace(self):
-        """Returns true if name binding introduces new namespace.
+        """Returns *True* if name binding introduces new namespace.
 
         If the name is used as the target of a function or class
         statement, this will be true.
@@ -234,7 +304,7 @@
         return self.__namespaces
 
     def get_namespace(self):
-        """Returns the single namespace bound to this name.
+        """Return the single namespace bound to this name.
 
         Raises ValueError if the name is bound to multiple namespaces.
         """
diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py
index bf04ac5..daf9f00 100644
--- a/Lib/sysconfig.py
+++ b/Lib/sysconfig.py
@@ -18,6 +18,11 @@
     'parse_config_h',
 ]
 
+# Keys for get_config_var() that are never converted to Python integers.
+_ALWAYS_STR = {
+    'MACOSX_DEPLOYMENT_TARGET',
+}
+
 _INSTALL_SCHEMES = {
     'posix_prefix': {
         'stdlib': '{installed_base}/{platlibdir}/python{py_version_short}',
@@ -51,42 +56,73 @@
         'scripts': '{base}/Scripts',
         'data': '{base}',
         },
-    # NOTE: When modifying "purelib" scheme, update site._get_path() too.
-    'nt_user': {
-        'stdlib': '{userbase}/Python{py_version_nodot}',
-        'platstdlib': '{userbase}/Python{py_version_nodot}',
-        'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
-        'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
-        'include': '{userbase}/Python{py_version_nodot}/Include',
-        'scripts': '{userbase}/Python{py_version_nodot}/Scripts',
-        'data': '{userbase}',
-        },
-    'posix_user': {
-        'stdlib': '{userbase}/{platlibdir}/python{py_version_short}',
-        'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}',
-        'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
-        'platlib': '{userbase}/{platlibdir}/python{py_version_short}/site-packages',
-        'include': '{userbase}/include/python{py_version_short}',
-        'scripts': '{userbase}/bin',
-        'data': '{userbase}',
-        },
-    'osx_framework_user': {
-        'stdlib': '{userbase}/lib/python',
-        'platstdlib': '{userbase}/lib/python',
-        'purelib': '{userbase}/lib/python/site-packages',
-        'platlib': '{userbase}/lib/python/site-packages',
-        'include': '{userbase}/include',
-        'scripts': '{userbase}/bin',
-        'data': '{userbase}',
-        },
+    }
+
+
+# NOTE: site.py has copy of this function.
+# Sync it when modify this function.
+def _getuserbase():
+    env_base = os.environ.get("PYTHONUSERBASE", None)
+    if env_base:
+        return env_base
+
+    # VxWorks has no home directories
+    if sys.platform == "vxworks":
+        return None
+
+    def joinuser(*args):
+        return os.path.expanduser(os.path.join(*args))
+
+    if os.name == "nt":
+        base = os.environ.get("APPDATA") or "~"
+        return joinuser(base, "Python")
+
+    if sys.platform == "darwin" and sys._framework:
+        return joinuser("~", "Library", sys._framework,
+                        f"{sys.version_info[0]}.{sys.version_info[1]}")
+
+    return joinuser("~", ".local")
+
+_HAS_USER_BASE = (_getuserbase() is not None)
+
+if _HAS_USER_BASE:
+    _INSTALL_SCHEMES |= {
+        # NOTE: When modifying "purelib" scheme, update site._get_path() too.
+        'nt_user': {
+            'stdlib': '{userbase}/Python{py_version_nodot_plat}',
+            'platstdlib': '{userbase}/Python{py_version_nodot_plat}',
+            'purelib': '{userbase}/Python{py_version_nodot_plat}/site-packages',
+            'platlib': '{userbase}/Python{py_version_nodot_plat}/site-packages',
+            'include': '{userbase}/Python{py_version_nodot_plat}/Include',
+            'scripts': '{userbase}/Python{py_version_nodot_plat}/Scripts',
+            'data': '{userbase}',
+            },
+        'posix_user': {
+            'stdlib': '{userbase}/{platlibdir}/python{py_version_short}',
+            'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}',
+            'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
+            'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
+            'include': '{userbase}/include/python{py_version_short}',
+            'scripts': '{userbase}/bin',
+            'data': '{userbase}',
+            },
+        'osx_framework_user': {
+            'stdlib': '{userbase}/lib/python',
+            'platstdlib': '{userbase}/lib/python',
+            'purelib': '{userbase}/lib/python/site-packages',
+            'platlib': '{userbase}/lib/python/site-packages',
+            'include': '{userbase}/include/python{py_version_short}',
+            'scripts': '{userbase}/bin',
+            'data': '{userbase}',
+            },
     }
 
 _SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
                 'scripts', 'data')
 
 _PY_VERSION = sys.version.split()[0]
-_PY_VERSION_SHORT = '%d.%d' % sys.version_info[:2]
-_PY_VERSION_SHORT_NO_DOT = '%d%d' % sys.version_info[:2]
+_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}'
+_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}'
 _PREFIX = os.path.normpath(sys.prefix)
 _BASE_PREFIX = os.path.normpath(sys.base_prefix)
 _EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
@@ -94,6 +130,12 @@
 _CONFIG_VARS = None
 _USER_BASE = None
 
+# Regexes needed for parsing Makefile (and similar syntaxes,
+# like old-style Setup files).
+_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)"
+_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)"
+_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}"
+
 
 def _safe_realpath(path):
     try:
@@ -142,18 +184,24 @@
 
 if _PYTHON_BUILD:
     for scheme in ('posix_prefix', 'posix_home'):
-        _INSTALL_SCHEMES[scheme]['include'] = '{srcdir}/Include'
-        _INSTALL_SCHEMES[scheme]['platinclude'] = '{projectbase}/.'
+        # On POSIX-y platforms, Python will:
+        # - Build from .h files in 'headers' (which is only added to the
+        #   scheme when building CPython)
+        # - Install .h files to 'include'
+        scheme = _INSTALL_SCHEMES[scheme]
+        scheme['headers'] = scheme['include']
+        scheme['include'] = '{srcdir}/Include'
+        scheme['platinclude'] = '{projectbase}/.'
 
 
 def _subst_vars(s, local_vars):
     try:
         return s.format(**local_vars)
-    except KeyError:
+    except KeyError as var:
         try:
             return s.format(**os.environ)
-        except KeyError as var:
-            raise AttributeError('{%s}' % var) from None
+        except KeyError:
+            raise AttributeError(f'{var}') from None
 
 def _extend_dict(target_dict, other_dict):
     target_keys = target_dict.keys()
@@ -176,60 +224,62 @@
     return res
 
 
-def _get_default_scheme():
-    if os.name == 'posix':
-        # the default scheme for posix is posix_prefix
-        return 'posix_prefix'
-    return os.name
+def _get_preferred_schemes():
+    if os.name == 'nt':
+        return {
+            'prefix': 'nt',
+            'home': 'posix_home',
+            'user': 'nt_user',
+        }
+    if sys.platform == 'darwin' and sys._framework:
+        return {
+            'prefix': 'posix_prefix',
+            'home': 'posix_home',
+            'user': 'osx_framework_user',
+        }
+    return {
+        'prefix': 'posix_prefix',
+        'home': 'posix_home',
+        'user': 'posix_user',
+    }
 
 
-# NOTE: site.py has copy of this function.
-# Sync it when modify this function.
-def _getuserbase():
-    env_base = os.environ.get("PYTHONUSERBASE", None)
-    if env_base:
-        return env_base
-
-    def joinuser(*args):
-        return os.path.expanduser(os.path.join(*args))
-
-    if os.name == "nt":
-        base = os.environ.get("APPDATA") or "~"
-        return joinuser(base, "Python")
-
-    if sys.platform == "darwin" and sys._framework:
-        return joinuser("~", "Library", sys._framework,
-                        "%d.%d" % sys.version_info[:2])
-
-    return joinuser("~", ".local")
+def get_preferred_scheme(key):
+    scheme = _get_preferred_schemes()[key]
+    if scheme not in _INSTALL_SCHEMES:
+        raise ValueError(
+            f"{key!r} returned {scheme!r}, which is not a valid scheme "
+            f"on this platform"
+        )
+    return scheme
 
 
-def _parse_makefile(filename, vars=None):
+def get_default_scheme():
+    return get_preferred_scheme('prefix')
+
+
+def _parse_makefile(filename, vars=None, keep_unresolved=True):
     """Parse a Makefile-style file.
 
     A dictionary containing name/value pairs is returned.  If an
     optional dictionary is passed in as the second argument, it is
     used instead of a new dictionary.
     """
-    # Regexes needed for parsing Makefile (and similar syntaxes,
-    # like old-style Setup files).
     import re
-    _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
-    _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
-    _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
 
     if vars is None:
         vars = {}
     done = {}
     notdone = {}
 
-    with open(filename, errors="surrogateescape") as f:
+    with open(filename, encoding=sys.getfilesystemencoding(),
+              errors="surrogateescape") as f:
         lines = f.readlines()
 
     for line in lines:
         if line.startswith('#') or line.strip() == '':
             continue
-        m = _variable_rx.match(line)
+        m = re.match(_variable_rx, line)
         if m:
             n, v = m.group(1, 2)
             v = v.strip()
@@ -240,6 +290,9 @@
                 notdone[n] = v
             else:
                 try:
+                    if n in _ALWAYS_STR:
+                        raise ValueError
+
                     v = int(v)
                 except ValueError:
                     # insert literal `$'
@@ -259,8 +312,8 @@
     while len(variables) > 0:
         for name in tuple(variables):
             value = notdone[name]
-            m1 = _findvar1_rx.search(value)
-            m2 = _findvar2_rx.search(value)
+            m1 = re.search(_findvar1_rx, value)
+            m2 = re.search(_findvar2_rx, value)
             if m1 and m2:
                 m = m1 if m1.start() < m2.start() else m2
             else:
@@ -298,6 +351,8 @@
                         notdone[name] = value
                     else:
                         try:
+                            if name in _ALWAYS_STR:
+                                raise ValueError
                             value = int(value)
                         except ValueError:
                             done[name] = value.strip()
@@ -313,9 +368,12 @@
                                 done[name] = value
 
             else:
+                # Adds unresolved variables to the done dict.
+                # This is disabled when called from distutils.sysconfig
+                if keep_unresolved:
+                    done[name] = value
                 # bogus variable reference (e.g. "prefix=$/opt/python");
                 # just drop it since we can't deal
-                done[name] = value
                 variables.remove(name)
 
     # strip spurious spaces
@@ -333,21 +391,20 @@
     if _PYTHON_BUILD:
         return os.path.join(_sys_home or _PROJECT_BASE, "Makefile")
     if hasattr(sys, 'abiflags'):
-        config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
+        config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}'
     else:
         config_dir_name = 'config'
     if hasattr(sys.implementation, '_multiarch'):
-        config_dir_name += '-%s' % sys.implementation._multiarch
+        config_dir_name += f'-{sys.implementation._multiarch}'
     return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
 
 
 def _get_sysconfigdata_name():
-    return os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
-        '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
-        abi=sys.abiflags,
-        platform=sys.platform,
-        multiarch=getattr(sys.implementation, '_multiarch', ''),
-    ))
+    multiarch = getattr(sys.implementation, '_multiarch', '')
+    return os.environ.get(
+        '_PYTHON_SYSCONFIGDATA_NAME',
+        f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}',
+    )
 
 
 def _generate_posix_vars():
@@ -359,19 +416,19 @@
     try:
         _parse_makefile(makefile, vars)
     except OSError as e:
-        msg = "invalid Python installation: unable to open %s" % makefile
+        msg = f"invalid Python installation: unable to open {makefile}"
         if hasattr(e, "strerror"):
-            msg = msg + " (%s)" % e.strerror
+            msg = f"{msg} ({e.strerror})"
         raise OSError(msg)
     # load the installed pyconfig.h:
     config_h = get_config_h_filename()
     try:
-        with open(config_h) as f:
+        with open(config_h, encoding="utf-8") as f:
             parse_config_h(f, vars)
     except OSError as e:
-        msg = "invalid Python installation: unable to open %s" % config_h
+        msg = f"invalid Python installation: unable to open {config_h}"
         if hasattr(e, "strerror"):
-            msg = msg + " (%s)" % e.strerror
+            msg = f"{msg} ({e.strerror})"
         raise OSError(msg)
     # On AIX, there are wrong paths to the linker scripts in the Makefile
     # -- these paths are relative to the Python source, but when installed
@@ -397,7 +454,7 @@
         module.build_time_vars = vars
         sys.modules[name] = module
 
-    pybuilddir = 'build/lib.%s-%s' % (get_platform(), _PY_VERSION_SHORT)
+    pybuilddir = f'build/lib.{get_platform()}-{_PY_VERSION_SHORT}'
     if hasattr(sys, "gettotalrefcount"):
         pybuilddir += '-pydebug'
     os.makedirs(pybuilddir, exist_ok=True)
@@ -424,13 +481,15 @@
 def _init_non_posix(vars):
     """Initialize the module as appropriate for NT"""
     # set basic install directories
+    import _imp
     vars['LIBDEST'] = get_path('stdlib')
     vars['BINLIBDEST'] = get_path('platstdlib')
     vars['INCLUDEPY'] = get_path('include')
-    vars['EXT_SUFFIX'] = '.pyd'
+    vars['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
     vars['EXE'] = '.exe'
     vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
     vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
+    vars['TZPATH'] = ''
 
 #
 # public APIs
@@ -458,6 +517,8 @@
         if m:
             n, v = m.group(1, 2)
             try:
+                if n in _ALWAYS_STR:
+                    raise ValueError
                 v = int(v)
             except ValueError:
                 pass
@@ -491,7 +552,7 @@
     return _SCHEME_KEYS
 
 
-def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
+def get_paths(scheme=get_default_scheme(), vars=None, expand=True):
     """Return a mapping containing an install scheme.
 
     ``scheme`` is the install scheme name. If not provided, it will
@@ -503,7 +564,7 @@
         return _INSTALL_SCHEMES[scheme]
 
 
-def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
+def get_path(name, scheme=get_default_scheme(), vars=None, expand=True):
     """Return a path corresponding to the scheme.
 
     ``scheme`` is the install scheme name.
@@ -543,20 +604,24 @@
         except AttributeError:
             # sys.abiflags may not be defined on all platforms.
             _CONFIG_VARS['abiflags'] = ''
+        try:
+            _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '')
+        except AttributeError:
+            _CONFIG_VARS['py_version_nodot_plat'] = ''
 
         if os.name == 'nt':
             _init_non_posix(_CONFIG_VARS)
-            _CONFIG_VARS['TZPATH'] = ''
         if os.name == 'posix':
             _init_posix(_CONFIG_VARS)
         # For backward compatibility, see issue19555
         SO = _CONFIG_VARS.get('EXT_SUFFIX')
         if SO is not None:
             _CONFIG_VARS['SO'] = SO
-        # Setting 'userbase' is done below the call to the
-        # init function to enable using 'get_config_var' in
-        # the init-function.
-        _CONFIG_VARS['userbase'] = _getuserbase()
+        if _HAS_USER_BASE:
+            # Setting 'userbase' is done below the call to the
+            # init function to enable using 'get_config_var' in
+            # the init-function.
+            _CONFIG_VARS['userbase'] = _getuserbase()
 
         # Always convert srcdir to an absolute path
         srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE)
@@ -653,16 +718,16 @@
         # At least on Linux/Intel, 'machine' is the processor --
         # i386, etc.
         # XXX what about Alpha, SPARC, etc?
-        return  "%s-%s" % (osname, machine)
+        return  f"{osname}-{machine}"
     elif osname[:5] == "sunos":
         if release[0] >= "5":           # SunOS 5 == Solaris 2
             osname = "solaris"
-            release = "%d.%s" % (int(release[0]) - 3, release[2:])
+            release = f"{int(release[0]) - 3}.{release[2:]}"
             # We can't use "platform.architecture()[0]" because a
             # bootstrap problem. We use a dict to get an error
             # if some suspicious happens.
             bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
-            machine += ".%s" % bitness[sys.maxsize]
+            machine += f".{bitness[sys.maxsize]}"
         # fall through to standard osname-release-machine representation
     elif osname[:3] == "aix":
         from _aix_support import aix_platform
@@ -680,18 +745,44 @@
                                             get_config_vars(),
                                             osname, release, machine)
 
-    return "%s-%s-%s" % (osname, release, machine)
+    return f"{osname}-{release}-{machine}"
 
 
 def get_python_version():
     return _PY_VERSION_SHORT
 
 
+def expand_makefile_vars(s, vars):
+    """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
+    'string' according to 'vars' (a dictionary mapping variable names to
+    values).  Variables not present in 'vars' are silently expanded to the
+    empty string.  The variable values in 'vars' should not contain further
+    variable expansions; if 'vars' is the output of 'parse_makefile()',
+    you're fine.  Returns a variable-expanded version of 's'.
+    """
+    import re
+
+    # This algorithm does multiple expansion, so if vars['foo'] contains
+    # "${bar}", it will expand ${foo} to ${bar}, and then expand
+    # ${bar}... and so forth.  This is fine as long as 'vars' comes from
+    # 'parse_makefile()', which takes care of such expansions eagerly,
+    # according to make's variable expansion semantics.
+
+    while True:
+        m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s)
+        if m:
+            (beg, end) = m.span()
+            s = s[0:beg] + vars.get(m.group(1)) + s[end:]
+        else:
+            break
+    return s
+
+
 def _print_dict(title, data):
     for index, (key, value) in enumerate(sorted(data.items())):
         if index == 0:
-            print('%s: ' % (title))
-        print('\t%s = "%s"' % (key, value))
+            print(f'{title}: ')
+        print(f'\t{key} = "{value}"')
 
 
 def _main():
@@ -699,9 +790,9 @@
     if '--generate-posix-vars' in sys.argv:
         _generate_posix_vars()
         return
-    print('Platform: "%s"' % get_platform())
-    print('Python version: "%s"' % get_python_version())
-    print('Current installation scheme: "%s"' % _get_default_scheme())
+    print(f'Platform: "{get_platform()}"')
+    print(f'Python version: "{get_python_version()}"')
+    print(f'Current installation scheme: "{get_default_scheme()}"')
     print()
     _print_dict('Paths', get_paths())
     print()
diff --git a/Lib/tarfile.py b/Lib/tarfile.py
index 1d15612..6ada9a0 100644
--- a/Lib/tarfile.py
+++ b/Lib/tarfile.py
@@ -200,6 +200,7 @@
     # base-256 representation. This allows values up to (256**(digits-1))-1.
     # A 0o200 byte indicates a positive number, a 0o377 byte a negative
     # number.
+    original_n = n
     n = int(n)
     if 0 <= n < 8 ** (digits - 1):
         s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
@@ -363,7 +364,7 @@
                 try:
                     import zlib
                 except ImportError:
-                    raise CompressionError("zlib module is not available")
+                    raise CompressionError("zlib module is not available") from None
                 self.zlib = zlib
                 self.crc = zlib.crc32(b"")
                 if mode == "r":
@@ -376,7 +377,7 @@
                 try:
                     import bz2
                 except ImportError:
-                    raise CompressionError("bz2 module is not available")
+                    raise CompressionError("bz2 module is not available") from None
                 if mode == "r":
                     self.dbuf = b""
                     self.cmp = bz2.BZ2Decompressor()
@@ -388,7 +389,7 @@
                 try:
                     import lzma
                 except ImportError:
-                    raise CompressionError("lzma module is not available")
+                    raise CompressionError("lzma module is not available") from None
                 if mode == "r":
                     self.dbuf = b""
                     self.cmp = lzma.LZMADecompressor()
@@ -541,8 +542,8 @@
                     break
             try:
                 buf = self.cmp.decompress(buf)
-            except self.exception:
-                raise ReadError("invalid compressed data")
+            except self.exception as e:
+                raise ReadError("invalid compressed data") from e
             t.append(buf)
             c += len(buf)
         t = b"".join(t)
@@ -887,15 +888,24 @@
         # Test number fields for values that exceed the field limit or values
         # that like to be stored as float.
         for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
-            if name in pax_headers:
-                # The pax header has priority. Avoid overflow.
-                info[name] = 0
-                continue
+            needs_pax = False
 
             val = info[name]
-            if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
-                pax_headers[name] = str(val)
+            val_is_float = isinstance(val, float)
+            val_int = round(val) if val_is_float else val
+            if not 0 <= val_int < 8 ** (digits - 1):
+                # Avoid overflow.
                 info[name] = 0
+                needs_pax = True
+            elif val_is_float:
+                # Put rounded value in ustar header, and full
+                # precision value in pax header.
+                info[name] = val_int
+                needs_pax = True
+
+            # The existing pax header has priority.
+            if needs_pax and name not in pax_headers:
+                pax_headers[name] = str(val)
 
         # Create a pax extended header if necessary.
         if pax_headers:
@@ -1164,8 +1174,8 @@
         # Fetch the next header and process it.
         try:
             next = self.fromtarfile(tarfile)
-        except HeaderError:
-            raise SubsequentHeaderError("missing or bad subsequent header")
+        except HeaderError as e:
+            raise SubsequentHeaderError(str(e)) from None
 
         # Patch the TarInfo object from the next header with
         # the longname information.
@@ -1277,8 +1287,8 @@
         # Fetch the next header.
         try:
             next = self.fromtarfile(tarfile)
-        except HeaderError:
-            raise SubsequentHeaderError("missing or bad subsequent header")
+        except HeaderError as e:
+            raise SubsequentHeaderError(str(e)) from None
 
         # Process GNU sparse information.
         if "GNU.sparse.map" in pax_headers:
@@ -1533,7 +1543,7 @@
                         self.fileobj.seek(self.offset)
                         break
                     except HeaderError as e:
-                        raise ReadError(str(e))
+                        raise ReadError(str(e)) from None
 
             if self.mode in ("a", "w", "x"):
                 self._loaded = True
@@ -1603,17 +1613,20 @@
             # Find out which *open() is appropriate for opening the file.
             def not_compressed(comptype):
                 return cls.OPEN_METH[comptype] == 'taropen'
+            error_msgs = []
             for comptype in sorted(cls.OPEN_METH, key=not_compressed):
                 func = getattr(cls, cls.OPEN_METH[comptype])
                 if fileobj is not None:
                     saved_pos = fileobj.tell()
                 try:
                     return func(name, "r", fileobj, **kwargs)
-                except (ReadError, CompressionError):
+                except (ReadError, CompressionError) as e:
+                    error_msgs.append(f'- method {comptype}: {e!r}')
                     if fileobj is not None:
                         fileobj.seek(saved_pos)
                     continue
-            raise ReadError("file could not be opened successfully")
+            error_msgs_summary = '\n'.join(error_msgs)
+            raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}")
 
         elif ":" in mode:
             filemode, comptype = mode.split(":", 1)
@@ -1669,21 +1682,21 @@
         try:
             from gzip import GzipFile
         except ImportError:
-            raise CompressionError("gzip module is not available")
+            raise CompressionError("gzip module is not available") from None
 
         try:
             fileobj = GzipFile(name, mode + "b", compresslevel, fileobj)
-        except OSError:
+        except OSError as e:
             if fileobj is not None and mode == 'r':
-                raise ReadError("not a gzip file")
+                raise ReadError("not a gzip file") from e
             raise
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except OSError:
+        except OSError as e:
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not a gzip file")
+                raise ReadError("not a gzip file") from e
             raise
         except:
             fileobj.close()
@@ -1702,16 +1715,16 @@
         try:
             from bz2 import BZ2File
         except ImportError:
-            raise CompressionError("bz2 module is not available")
+            raise CompressionError("bz2 module is not available") from None
 
         fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel)
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except (OSError, EOFError):
+        except (OSError, EOFError) as e:
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not a bzip2 file")
+                raise ReadError("not a bzip2 file") from e
             raise
         except:
             fileobj.close()
@@ -1730,16 +1743,16 @@
         try:
             from lzma import LZMAFile, LZMAError
         except ImportError:
-            raise CompressionError("lzma module is not available")
+            raise CompressionError("lzma module is not available") from None
 
         fileobj = LZMAFile(fileobj or name, mode, preset=preset)
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except (LZMAError, EOFError):
+        except (LZMAError, EOFError) as e:
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not an lzma file")
+                raise ReadError("not an lzma file") from e
             raise
         except:
             fileobj.close()
@@ -1785,7 +1798,7 @@
            than once in the archive, its last occurrence is assumed to be the
            most up-to-date version.
         """
-        tarinfo = self._getmember(name)
+        tarinfo = self._getmember(name.rstrip('/'))
         if tarinfo is None:
             raise KeyError("filename %r not found" % name)
         return tarinfo
@@ -2253,7 +2266,7 @@
                 self._extract_member(self._find_link_target(tarinfo),
                                      targetpath)
             except KeyError:
-                raise ExtractError("unable to resolve link inside archive")
+                raise ExtractError("unable to resolve link inside archive") from None
 
     def chown(self, tarinfo, targetpath, numeric_owner):
         """Set owner of targetpath according to tarinfo. If numeric_owner
@@ -2281,16 +2294,16 @@
                     os.lchown(targetpath, u, g)
                 else:
                     os.chown(targetpath, u, g)
-            except OSError:
-                raise ExtractError("could not change owner")
+            except OSError as e:
+                raise ExtractError("could not change owner") from e
 
     def chmod(self, tarinfo, targetpath):
         """Set file permissions of targetpath according to tarinfo.
         """
         try:
             os.chmod(targetpath, tarinfo.mode)
-        except OSError:
-            raise ExtractError("could not change mode")
+        except OSError as e:
+            raise ExtractError("could not change mode") from e
 
     def utime(self, tarinfo, targetpath):
         """Set modification time of targetpath according to tarinfo.
@@ -2299,8 +2312,8 @@
             return
         try:
             os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
-        except OSError:
-            raise ExtractError("could not change modification time")
+        except OSError as e:
+            raise ExtractError("could not change modification time") from e
 
     #--------------------------------------------------------------------------
     def next(self):
@@ -2336,15 +2349,24 @@
                     self.offset += BLOCKSIZE
                     continue
                 elif self.offset == 0:
-                    raise ReadError(str(e))
+                    raise ReadError(str(e)) from None
             except EmptyHeaderError:
                 if self.offset == 0:
-                    raise ReadError("empty file")
+                    raise ReadError("empty file") from None
             except TruncatedHeaderError as e:
                 if self.offset == 0:
-                    raise ReadError(str(e))
+                    raise ReadError(str(e)) from None
             except SubsequentHeaderError as e:
-                raise ReadError(str(e))
+                raise ReadError(str(e)) from None
+            except Exception as e:
+                try:
+                    import zlib
+                    if isinstance(e, zlib.error):
+                        raise ReadError(f'zlib error: {e}') from None
+                    else:
+                        raise e
+                except ImportError:
+                    raise e
             break
 
         if tarinfo is not None:
diff --git a/Lib/tempfile.py b/Lib/tempfile.py
index 770f72c..7b68212 100644
--- a/Lib/tempfile.py
+++ b/Lib/tempfile.py
@@ -88,6 +88,10 @@
     for arg in args:
         if arg is None:
             continue
+
+        if isinstance(arg, _os.PathLike):
+            arg = _os.fspath(arg)
+
         if isinstance(arg, bytes):
             if return_type is str:
                 raise TypeError("Can't mix bytes and non-bytes in "
@@ -99,7 +103,11 @@
                                 "path components.")
             return_type = str
     if return_type is None:
-        return str  # tempfile APIs return a str by default.
+        if tempdir is None or isinstance(tempdir, str):
+            return str  # tempfile APIs return a str by default.
+        else:
+            # we could check for bytes but it'll fail later on anyway
+            return bytes
     return return_type
 
 
@@ -143,10 +151,7 @@
         return self
 
     def __next__(self):
-        c = self.characters
-        choose = self.rng.choice
-        letters = [choose(c) for dummy in range(8)]
-        return ''.join(letters)
+        return ''.join(self.rng.choices(self.characters, k=8))
 
 def _candidate_tempdir_list():
     """Generate a list of candidate temporary directories which
@@ -268,17 +273,17 @@
 # User visible interfaces.
 
 def gettempprefix():
-    """The default prefix for temporary directories."""
-    return template
+    """The default prefix for temporary directories as string."""
+    return _os.fsdecode(template)
 
 def gettempprefixb():
     """The default prefix for temporary directories as bytes."""
-    return _os.fsencode(gettempprefix())
+    return _os.fsencode(template)
 
 tempdir = None
 
-def gettempdir():
-    """Accessor for tempfile.tempdir."""
+def _gettempdir():
+    """Private accessor for tempfile.tempdir."""
     global tempdir
     if tempdir is None:
         _once_lock.acquire()
@@ -289,9 +294,13 @@
             _once_lock.release()
     return tempdir
 
+def gettempdir():
+    """Returns tempfile.tempdir as str."""
+    return _os.fsdecode(_gettempdir())
+
 def gettempdirb():
-    """A bytes version of tempfile.gettempdir()."""
-    return _os.fsencode(gettempdir())
+    """Returns tempfile.tempdir as bytes."""
+    return _os.fsencode(_gettempdir())
 
 def mkstemp(suffix=None, prefix=None, dir=None, text=False):
     """User-callable function to create and return a unique temporary
@@ -538,6 +547,9 @@
     if _os.name == 'nt' and delete:
         flags |= _os.O_TEMPORARY
 
+    if "b" not in mode:
+        encoding = _io.text_encoding(encoding)
+
     (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
     try:
         file = _io.open(fd, mode, buffering=buffering,
@@ -578,6 +590,9 @@
         """
         global _O_TMPFILE_WORKS
 
+        if "b" not in mode:
+            encoding = _io.text_encoding(encoding)
+
         prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
 
         flags = _bin_openflags
@@ -633,6 +648,7 @@
         if 'b' in mode:
             self._file = _io.BytesIO()
         else:
+            encoding = _io.text_encoding(encoding)
             self._file = _io.TextIOWrapper(_io.BytesIO(),
                             encoding=encoding, errors=errors,
                             newline=newline)
@@ -763,7 +779,7 @@
         return rv
 
 
-class TemporaryDirectory(object):
+class TemporaryDirectory:
     """Create and return a temporary directory.  This has the same
     behavior as mkdtemp but can be used as a context manager.  For
     example:
@@ -775,14 +791,17 @@
     in it are removed.
     """
 
-    def __init__(self, suffix=None, prefix=None, dir=None):
+    def __init__(self, suffix=None, prefix=None, dir=None,
+                 ignore_cleanup_errors=False):
         self.name = mkdtemp(suffix, prefix, dir)
+        self._ignore_cleanup_errors = ignore_cleanup_errors
         self._finalizer = _weakref.finalize(
             self, self._cleanup, self.name,
-            warn_message="Implicitly cleaning up {!r}".format(self))
+            warn_message="Implicitly cleaning up {!r}".format(self),
+            ignore_errors=self._ignore_cleanup_errors)
 
     @classmethod
-    def _rmtree(cls, name):
+    def _rmtree(cls, name, ignore_errors=False):
         def onerror(func, path, exc_info):
             if issubclass(exc_info[0], PermissionError):
                 def resetperms(path):
@@ -801,19 +820,20 @@
                         _os.unlink(path)
                     # PermissionError is raised on FreeBSD for directories
                     except (IsADirectoryError, PermissionError):
-                        cls._rmtree(path)
+                        cls._rmtree(path, ignore_errors=ignore_errors)
                 except FileNotFoundError:
                     pass
             elif issubclass(exc_info[0], FileNotFoundError):
                 pass
             else:
-                raise
+                if not ignore_errors:
+                    raise
 
         _shutil.rmtree(name, onerror=onerror)
 
     @classmethod
-    def _cleanup(cls, name, warn_message):
-        cls._rmtree(name)
+    def _cleanup(cls, name, warn_message, ignore_errors=False):
+        cls._rmtree(name, ignore_errors=ignore_errors)
         _warnings.warn(warn_message, ResourceWarning)
 
     def __repr__(self):
@@ -826,7 +846,7 @@
         self.cleanup()
 
     def cleanup(self):
-        if self._finalizer.detach():
-            self._rmtree(self.name)
+        if self._finalizer.detach() or _os.path.exists(self.name):
+            self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors)
 
     __class_getitem__ = classmethod(_types.GenericAlias)
diff --git a/Lib/textwrap.py b/Lib/textwrap.py
index 30e693c..841de9b 100644
--- a/Lib/textwrap.py
+++ b/Lib/textwrap.py
@@ -215,8 +215,16 @@
         # If we're allowed to break long words, then do so: put as much
         # of the next chunk onto the current line as will fit.
         if self.break_long_words:
-            cur_line.append(reversed_chunks[-1][:space_left])
-            reversed_chunks[-1] = reversed_chunks[-1][space_left:]
+            end = space_left
+            chunk = reversed_chunks[-1]
+            if self.break_on_hyphens and len(chunk) > space_left:
+                # break after last hyphen, but only if there are
+                # non-hyphens before it
+                hyphen = chunk.rfind('-', 0, space_left)
+                if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]):
+                    end = hyphen + 1
+            cur_line.append(chunk[:end])
+            reversed_chunks[-1] = chunk[end:]
 
         # Otherwise, we have to preserve the long word intact.  Only add
         # it to the current line if there's nothing already there --
diff --git a/Lib/threading.py b/Lib/threading.py
index d96d99a..2d89742 100644
--- a/Lib/threading.py
+++ b/Lib/threading.py
@@ -28,7 +28,7 @@
            'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
            'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
            'setprofile', 'settrace', 'local', 'stack_size',
-           'excepthook', 'ExceptHookArgs']
+           'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile']
 
 # Rename some stuff so "from threading import *" is safe
 _start_new_thread = _thread.start_new_thread
@@ -65,6 +65,10 @@
     global _profile_hook
     _profile_hook = func
 
+def getprofile():
+    """Get the profiler function as set by threading.setprofile()."""
+    return _profile_hook
+
 def settrace(func):
     """Set a trace function for all threads started from the threading module.
 
@@ -75,6 +79,10 @@
     global _trace_hook
     _trace_hook = func
 
+def gettrace():
+    """Get the trace function as set by threading.settrace()."""
+    return _trace_hook
+
 # Synchronization classes
 
 Lock = _allocate_lock
@@ -380,7 +388,16 @@
         """
         self.notify(len(self._waiters))
 
-    notifyAll = notify_all
+    def notifyAll(self):
+        """Wake up all threads waiting on this condition.
+
+        This method is deprecated, use notify_all() instead.
+
+        """
+        import warnings
+        warnings.warn('notifyAll() is deprecated, use notify_all() instead',
+                      DeprecationWarning, stacklevel=2)
+        self.notify_all()
 
 
 class Semaphore:
@@ -530,7 +547,16 @@
         """Return true if and only if the internal flag is true."""
         return self._flag
 
-    isSet = is_set
+    def isSet(self):
+        """Return true if and only if the internal flag is true.
+
+        This method is deprecated, use notify_all() instead.
+
+        """
+        import warnings
+        warnings.warn('isSet() is deprecated, use is_set() instead',
+                      DeprecationWarning, stacklevel=2)
+        return self.is_set()
 
     def set(self):
         """Set the internal flag to true.
@@ -608,7 +634,7 @@
         self._action = action
         self._timeout = timeout
         self._parties = parties
-        self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
+        self._state = 0  # 0 filling, 1 draining, -1 resetting, -2 broken
         self._count = 0
 
     def wait(self, timeout=None):
@@ -745,22 +771,39 @@
 
 
 # Helper to generate new thread names
-_counter = _count().__next__
-_counter() # Consume 0 so first non-main thread has id 1.
-def _newname(template="Thread-%d"):
-    return template % _counter()
+_counter = _count(1).__next__
+def _newname(name_template):
+    return name_template % _counter()
 
-# Active thread administration
-_active_limbo_lock = _allocate_lock()
+# Active thread administration.
+#
+# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like
+# threading.enumerate().
+_active_limbo_lock = RLock()
 _active = {}    # maps thread id to Thread object
 _limbo = {}
 _dangling = WeakSet()
+
 # Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
 # to wait until all Python thread states get deleted:
 # see Thread._set_tstate_lock().
 _shutdown_locks_lock = _allocate_lock()
 _shutdown_locks = set()
 
+def _maintain_shutdown_locks():
+    """
+    Drop any shutdown locks that don't correspond to running threads anymore.
+
+    Calling this from time to time avoids an ever-growing _shutdown_locks
+    set when Thread objects are not joined explicitly. See bpo-37788.
+
+    This must be called with _shutdown_locks_lock acquired.
+    """
+    # If a lock was released, the corresponding thread has exited
+    to_remove = [lock for lock in _shutdown_locks if not lock.locked()]
+    _shutdown_locks.difference_update(to_remove)
+
+
 # Main class for threads
 
 class Thread:
@@ -800,8 +843,19 @@
         assert group is None, "group argument must be None for now"
         if kwargs is None:
             kwargs = {}
+        if name:
+            name = str(name)
+        else:
+            name = _newname("Thread-%d")
+            if target is not None:
+                try:
+                    target_name = target.__name__
+                    name += f" ({target_name})"
+                except AttributeError:
+                    pass
+
         self._target = target
-        self._name = str(name or _newname())
+        self._name = name
         self._args = args
         self._kwargs = kwargs
         if daemon is not None:
@@ -888,7 +942,7 @@
 
         """
         try:
-            if self._target:
+            if self._target is not None:
                 self._target(*self._args, **self._kwargs)
         finally:
             # Avoid a refcycle if the thread is running a function with
@@ -932,6 +986,7 @@
 
         if not self.daemon:
             with _shutdown_locks_lock:
+                _maintain_shutdown_locks()
                 _shutdown_locks.add(self._tstate_lock)
 
     def _bootstrap_inner(self):
@@ -987,7 +1042,8 @@
         self._tstate_lock = None
         if not self.daemon:
             with _shutdown_locks_lock:
-                _shutdown_locks.discard(lock)
+                # Remove our lock and other released locks from _shutdown_locks
+                _maintain_shutdown_locks()
 
     def _delete(self):
         "Remove current thread from the dict of currently running threads."
@@ -1044,11 +1100,24 @@
         # If the lock is acquired, the C code is done, and self._stop() is
         # called.  That sets ._is_stopped to True, and ._tstate_lock to None.
         lock = self._tstate_lock
-        if lock is None:  # already determined that the C code is done
+        if lock is None:
+            # already determined that the C code is done
             assert self._is_stopped
-        elif lock.acquire(block, timeout):
-            lock.release()
-            self._stop()
+            return
+
+        try:
+            if lock.acquire(block, timeout):
+                lock.release()
+                self._stop()
+        except:
+            if lock.locked():
+                # bpo-45274: lock.acquire() acquired the lock, but the function
+                # was interrupted with an exception before reaching the
+                # lock.release(). It can happen if a signal handler raises an
+                # exception, like CTRL+C which raises KeyboardInterrupt.
+                lock.release()
+                self._stop()
+            raise
 
     @property
     def name(self):
@@ -1094,8 +1163,8 @@
         """Return whether the thread is alive.
 
         This method returns True just before the run() method starts until just
-        after the run() method terminates. The module function enumerate()
-        returns a list of all alive threads.
+        after the run() method terminates. See also the module function
+        enumerate().
 
         """
         assert self._initialized, "Thread.__init__() not called"
@@ -1128,15 +1197,47 @@
         self._daemonic = daemonic
 
     def isDaemon(self):
+        """Return whether this thread is a daemon.
+
+        This method is deprecated, use the daemon attribute instead.
+
+        """
+        import warnings
+        warnings.warn('isDaemon() is deprecated, get the daemon attribute instead',
+                      DeprecationWarning, stacklevel=2)
         return self.daemon
 
     def setDaemon(self, daemonic):
+        """Set whether this thread is a daemon.
+
+        This method is deprecated, use the .daemon property instead.
+
+        """
+        import warnings
+        warnings.warn('setDaemon() is deprecated, set the daemon attribute instead',
+                      DeprecationWarning, stacklevel=2)
         self.daemon = daemonic
 
     def getName(self):
+        """Return a string used for identification purposes only.
+
+        This method is deprecated, use the name attribute instead.
+
+        """
+        import warnings
+        warnings.warn('getName() is deprecated, get the name attribute instead',
+                      DeprecationWarning, stacklevel=2)
         return self.name
 
     def setName(self, name):
+        """Set the name string for this thread.
+
+        This method is deprecated, use the name attribute instead.
+
+        """
+        import warnings
+        warnings.warn('setName() is deprecated, set the name attribute instead',
+                      DeprecationWarning, stacklevel=2)
         self.name = name
 
 
@@ -1186,6 +1287,10 @@
         stderr.flush()
 
 
+# Original value of threading.excepthook
+__excepthook__ = excepthook
+
+
 def _make_invoke_excepthook():
     # Create a local namespace to ensure that variables remain alive
     # when _invoke_excepthook() is called, even if it is called late during
@@ -1327,7 +1432,16 @@
     except KeyError:
         return _DummyThread()
 
-currentThread = current_thread
+def currentThread():
+    """Return the current Thread object, corresponding to the caller's thread of control.
+
+    This function is deprecated, use current_thread() instead.
+
+    """
+    import warnings
+    warnings.warn('currentThread() is deprecated, use current_thread() instead',
+                  DeprecationWarning, stacklevel=2)
+    return current_thread()
 
 def active_count():
     """Return the number of Thread objects currently alive.
@@ -1339,7 +1453,16 @@
     with _active_limbo_lock:
         return len(_active) + len(_limbo)
 
-activeCount = active_count
+def activeCount():
+    """Return the number of Thread objects currently alive.
+
+    This function is deprecated, use active_count() instead.
+
+    """
+    import warnings
+    warnings.warn('activeCount() is deprecated, use active_count() instead',
+                  DeprecationWarning, stacklevel=2)
+    return active_count()
 
 def _enumerate():
     # Same as enumerate(), but without the lock. Internal use only.
@@ -1400,20 +1523,29 @@
 
     global _SHUTTING_DOWN
     _SHUTTING_DOWN = True
-    # Main thread
-    tlock = _main_thread._tstate_lock
-    # The main thread isn't finished yet, so its thread state lock can't have
-    # been released.
-    assert tlock is not None
-    assert tlock.locked()
-    tlock.release()
-    _main_thread._stop()
 
     # Call registered threading atexit functions before threads are joined.
     # Order is reversed, similar to atexit.
     for atexit_call in reversed(_threading_atexits):
         atexit_call()
 
+    # Main thread
+    if _main_thread.ident == get_ident():
+        tlock = _main_thread._tstate_lock
+        # The main thread isn't finished yet, so its thread state lock can't
+        # have been released.
+        assert tlock is not None
+        assert tlock.locked()
+        tlock.release()
+        _main_thread._stop()
+    else:
+        # bpo-1596321: _shutdown() must be called in the main thread.
+        # If the threading module was not imported by the main thread,
+        # _main_thread is the thread which imported the threading module.
+        # In this case, ignore _main_thread, similar behavior than for threads
+        # spawned by C libraries or using _thread.start_new_thread().
+        pass
+
     # Join all non-deamon threads
     while True:
         with _shutdown_locks_lock:
@@ -1424,7 +1556,7 @@
             break
 
         for lock in locks:
-            # mimick Thread.join()
+            # mimic Thread.join()
             lock.acquire()
             lock.release()
 
@@ -1457,7 +1589,7 @@
     # by another (non-forked) thread.  http://bugs.python.org/issue874900
     global _active_limbo_lock, _main_thread
     global _shutdown_locks_lock, _shutdown_locks
-    _active_limbo_lock = _allocate_lock()
+    _active_limbo_lock = RLock()
 
     # fork() only copied the current thread; clear references to others.
     new_active = {}
diff --git a/Lib/timeit.py b/Lib/timeit.py
index 6c3ec01..9dfd454 100644
--- a/Lib/timeit.py
+++ b/Lib/timeit.py
@@ -72,6 +72,7 @@
     _t0 = _timer()
     for _i in _it:
         {stmt}
+        pass
     _t1 = _timer()
     return _t1 - _t0
 """
diff --git a/Lib/token.py b/Lib/token.py
index 493bf04..9d0c0bf 100644
--- a/Lib/token.py
+++ b/Lib/token.py
@@ -62,12 +62,13 @@
 ASYNC = 56
 TYPE_IGNORE = 57
 TYPE_COMMENT = 58
+SOFT_KEYWORD = 59
 # These aren't used by the C tokenizer but are needed for tokenize.py
-ERRORTOKEN = 59
-COMMENT = 60
-NL = 61
-ENCODING = 62
-N_TOKENS = 63
+ERRORTOKEN = 60
+COMMENT = 61
+NL = 62
+ENCODING = 63
+N_TOKENS = 64
 # Special definitions for cooperation with parser
 NT_OFFSET = 256
 
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 1aee21b..7d7736f 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -27,6 +27,7 @@
 from builtins import open as _builtin_open
 from codecs import lookup, BOM_UTF8
 import collections
+import functools
 from io import TextIOWrapper
 import itertools as _itertools
 import re
@@ -95,6 +96,7 @@
                 result.add(''.join(u))
     return result
 
+@functools.lru_cache
 def _compile(expr):
     return re.compile(expr, re.UNICODE)
 
@@ -602,7 +604,7 @@
                 pos += 1
 
     # Add an implicit NEWLINE if the input doesn't end in one
-    if last_line and last_line[-1] not in '\r\n':
+    if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
         yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
     for indent in indents[1:]:                 # pop remaining indent levels
         yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
diff --git a/Lib/trace.py b/Lib/trace.py
index c505d8b..2cf3643 100644
--- a/Lib/trace.py
+++ b/Lib/trace.py
@@ -116,7 +116,7 @@
         return 0
 
 def _modname(path):
-    """Return a plausible module name for the patch."""
+    """Return a plausible module name for the path."""
 
     base = os.path.basename(path)
     filename, ext = os.path.splitext(base)
diff --git a/Lib/traceback.py b/Lib/traceback.py
index fb34de9..d6a010f 100644
--- a/Lib/traceback.py
+++ b/Lib/traceback.py
@@ -84,7 +84,25 @@
     "another exception occurred:\n\n")
 
 
-def print_exception(etype, value, tb, limit=None, file=None, chain=True):
+class _Sentinel:
+    def __repr__(self):
+        return "<implicit>"
+
+_sentinel = _Sentinel()
+
+def _parse_value_tb(exc, value, tb):
+    if (value is _sentinel) != (tb is _sentinel):
+        raise ValueError("Both or neither of value and tb must be given")
+    if value is tb is _sentinel:
+        if exc is not None:
+            return exc, exc.__traceback__
+        else:
+            return None, None
+    return value, tb
+
+
+def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
+                    file=None, chain=True):
     """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
 
     This differs from print_tb() in the following ways: (1) if
@@ -95,17 +113,16 @@
     occurred with a caret on the next line indicating the approximate
     position of the error.
     """
-    # format_exception has ignored etype for some time, and code such as cgitb
-    # passes in bogus values as a result. For compatibility with such code we
-    # ignore it here (rather than in the new TracebackException API).
+    value, tb = _parse_value_tb(exc, value, tb)
     if file is None:
         file = sys.stderr
-    for line in TracebackException(
-            type(value), value, tb, limit=limit).format(chain=chain):
+    te = TracebackException(type(value), value, tb, limit=limit, compact=True)
+    for line in te.format(chain=chain):
         print(line, file=file, end="")
 
 
-def format_exception(etype, value, tb, limit=None, chain=True):
+def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
+                     chain=True):
     """Format a stack trace and the exception information.
 
     The arguments have the same meaning as the corresponding arguments
@@ -114,19 +131,15 @@
     these lines are concatenated and printed, exactly the same text is
     printed as does print_exception().
     """
-    # format_exception has ignored etype for some time, and code such as cgitb
-    # passes in bogus values as a result. For compatibility with such code we
-    # ignore it here (rather than in the new TracebackException API).
-    return list(TracebackException(
-        type(value), value, tb, limit=limit).format(chain=chain))
+    value, tb = _parse_value_tb(exc, value, tb)
+    te = TracebackException(type(value), value, tb, limit=limit, compact=True)
+    return list(te.format(chain=chain))
 
 
-def format_exception_only(etype, value):
+def format_exception_only(exc, /, value=_sentinel):
     """Format the exception part of a traceback.
 
-    The arguments are the exception type and value such as given by
-    sys.last_type and sys.last_value. The return value is a list of
-    strings, each ending in a newline.
+    The return value is a list of strings, each ending in a newline.
 
     Normally, the list contains a single string; however, for
     SyntaxError exceptions, it contains several lines that (when
@@ -137,7 +150,10 @@
     string in the list.
 
     """
-    return list(TracebackException(etype, value, None).format_exception_only())
+    if value is _sentinel:
+        value = exc
+    te = TracebackException(type(value), value, None, compact=True)
+    return list(te.format_exception_only())
 
 
 # -- not official API but folk probably use these two functions.
@@ -285,9 +301,10 @@
     @property
     def line(self):
         if self._line is None:
-            self._line = linecache.getline(self.filename, self.lineno).strip()
-        return self._line
-
+            if self.lineno is None:
+                return None
+            self._line = linecache.getline(self.filename, self.lineno)
+        return self._line.strip()
 
 def walk_stack(f):
     """Walk a stack yielding the frame and line number for each frame.
@@ -458,52 +475,29 @@
       occurred.
     - :attr:`lineno` For syntax errors - the linenumber where the error
       occurred.
+    - :attr:`end_lineno` For syntax errors - the end linenumber where the error
+      occurred. Can be `None` if not present.
     - :attr:`text` For syntax errors - the text where the error
       occurred.
     - :attr:`offset` For syntax errors - the offset into the text where the
       error occurred.
+    - :attr:`end_offset` For syntax errors - the offset into the text where the
+      error occurred. Can be `None` if not present.
     - :attr:`msg` For syntax errors - the compiler error message.
     """
 
     def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None,
-            lookup_lines=True, capture_locals=False, _seen=None):
+            lookup_lines=True, capture_locals=False, compact=False,
+            _seen=None):
         # NB: we need to accept exc_traceback, exc_value, exc_traceback to
         # permit backwards compat with the existing API, otherwise we
         # need stub thunk objects just to glue it together.
         # Handle loops in __cause__ or __context__.
+        is_recursive_call = _seen is not None
         if _seen is None:
             _seen = set()
         _seen.add(id(exc_value))
-        # Gracefully handle (the way Python 2.4 and earlier did) the case of
-        # being called with no type or value (None, None, None).
-        if (exc_value and exc_value.__cause__ is not None
-            and id(exc_value.__cause__) not in _seen):
-            cause = TracebackException(
-                type(exc_value.__cause__),
-                exc_value.__cause__,
-                exc_value.__cause__.__traceback__,
-                limit=limit,
-                lookup_lines=False,
-                capture_locals=capture_locals,
-                _seen=_seen)
-        else:
-            cause = None
-        if (exc_value and exc_value.__context__ is not None
-            and id(exc_value.__context__) not in _seen):
-            context = TracebackException(
-                type(exc_value.__context__),
-                exc_value.__context__,
-                exc_value.__context__.__traceback__,
-                limit=limit,
-                lookup_lines=False,
-                capture_locals=capture_locals,
-                _seen=_seen)
-        else:
-            context = None
-        self.__cause__ = cause
-        self.__context__ = context
-        self.__suppress_context__ = \
-            exc_value.__suppress_context__ if exc_value else False
+
         # TODO: locals.
         self.stack = StackSummary.extract(
             walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,
@@ -515,12 +509,62 @@
         if exc_type and issubclass(exc_type, SyntaxError):
             # Handle SyntaxError's specially
             self.filename = exc_value.filename
-            self.lineno = str(exc_value.lineno)
+            lno = exc_value.lineno
+            self.lineno = str(lno) if lno is not None else None
+            end_lno = exc_value.end_lineno
+            self.end_lineno = str(end_lno) if end_lno is not None else None
             self.text = exc_value.text
             self.offset = exc_value.offset
+            self.end_offset = exc_value.end_offset
             self.msg = exc_value.msg
         if lookup_lines:
             self._load_lines()
+        self.__suppress_context__ = \
+            exc_value.__suppress_context__ if exc_value is not None else False
+
+        # Convert __cause__ and __context__ to `TracebackExceptions`s, use a
+        # queue to avoid recursion (only the top-level call gets _seen == None)
+        if not is_recursive_call:
+            queue = [(self, exc_value)]
+            while queue:
+                te, e = queue.pop()
+                if (e and e.__cause__ is not None
+                    and id(e.__cause__) not in _seen):
+                    cause = TracebackException(
+                        type(e.__cause__),
+                        e.__cause__,
+                        e.__cause__.__traceback__,
+                        limit=limit,
+                        lookup_lines=lookup_lines,
+                        capture_locals=capture_locals,
+                        _seen=_seen)
+                else:
+                    cause = None
+
+                if compact:
+                    need_context = (cause is None and
+                                    e is not None and
+                                    not e.__suppress_context__)
+                else:
+                    need_context = True
+                if (e and e.__context__ is not None
+                    and need_context and id(e.__context__) not in _seen):
+                    context = TracebackException(
+                        type(e.__context__),
+                        e.__context__,
+                        e.__context__.__traceback__,
+                        limit=limit,
+                        lookup_lines=lookup_lines,
+                        capture_locals=capture_locals,
+                        _seen=_seen)
+                else:
+                    context = None
+                te.__cause__ = cause
+                te.__context__ = context
+                if cause:
+                    queue.append((te.__cause__, e.__cause__))
+                if context:
+                    queue.append((te.__context__, e.__context__))
 
     @classmethod
     def from_exception(cls, exc, *args, **kwargs):
@@ -531,10 +575,6 @@
         """Private API. force all lines in the stack to be loaded."""
         for frame in self.stack:
             frame.line
-        if self.__context__:
-            self.__context__._load_lines()
-        if self.__cause__:
-            self.__cause__._load_lines()
 
     def __eq__(self, other):
         if isinstance(other, TracebackException):
@@ -564,6 +604,8 @@
         stype = self.exc_type.__qualname__
         smod = self.exc_type.__module__
         if smod not in ("__main__", "builtins"):
+            if not isinstance(smod, str):
+                smod = "<unknown>"
             stype = smod + '.' + stype
 
         if not issubclass(self.exc_type, SyntaxError):
@@ -574,9 +616,12 @@
     def _format_syntax_error(self, stype):
         """Format SyntaxError exceptions (internal helper)."""
         # Show exactly where the problem was found.
-        filename = self.filename or "<string>"
-        lineno = str(self.lineno) or '?'
-        yield '  File "{}", line {}\n'.format(filename, lineno)
+        filename_suffix = ''
+        if self.lineno is not None:
+            yield '  File "{}", line {}\n'.format(
+                self.filename or "<string>", self.lineno)
+        elif self.filename is not None:
+            filename_suffix = ' ({})'.format(self.filename)
 
         text = self.text
         if text is not None:
@@ -587,14 +632,22 @@
             ltext = rtext.lstrip(' \n\f')
             spaces = len(rtext) - len(ltext)
             yield '    {}\n'.format(ltext)
-            # Convert 1-based column offset to 0-based index into stripped text
-            caret = (self.offset or 0) - 1 - spaces
-            if caret >= 0:
-                # non-space whitespace (likes tabs) must be kept for alignment
-                caretspace = ((c if c.isspace() else ' ') for c in ltext[:caret])
-                yield '    {}^\n'.format(''.join(caretspace))
+
+            if self.offset is not None:
+                offset = self.offset
+                end_offset = self.end_offset if self.end_offset not in {None, 0} else offset
+                if offset == end_offset or end_offset == -1:
+                    end_offset = offset + 1
+
+                # Convert 1-based column offset to 0-based index into stripped text
+                colno = offset - 1 - spaces
+                end_colno = end_offset - 1 - spaces
+                if colno >= 0:
+                    # non-space whitespace (likes tabs) must be kept for alignment
+                    caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno])
+                    yield '    {}{}'.format("".join(caretspace), ('^' * (end_colno - colno) + "\n"))
         msg = self.msg or "<no detail available>"
-        yield "{}: {}\n".format(stype, msg)
+        yield "{}: {}{}\n".format(stype, msg, filename_suffix)
 
     def format(self, *, chain=True):
         """Format the exception.
@@ -608,15 +661,32 @@
         The message indicating which exception occurred is always the last
         string in the output.
         """
-        if chain:
-            if self.__cause__ is not None:
-                yield from self.__cause__.format(chain=chain)
-                yield _cause_message
-            elif (self.__context__ is not None and
-                not self.__suppress_context__):
-                yield from self.__context__.format(chain=chain)
-                yield _context_message
-        if self.stack:
-            yield 'Traceback (most recent call last):\n'
-            yield from self.stack.format()
-        yield from self.format_exception_only()
+
+        output = []
+        exc = self
+        while exc:
+            if chain:
+                if exc.__cause__ is not None:
+                    chained_msg = _cause_message
+                    chained_exc = exc.__cause__
+                elif (exc.__context__  is not None and
+                      not exc.__suppress_context__):
+                    chained_msg = _context_message
+                    chained_exc = exc.__context__
+                else:
+                    chained_msg = None
+                    chained_exc = None
+
+                output.append((chained_msg, exc))
+                exc = chained_exc
+            else:
+                output.append((None, exc))
+                exc = None
+
+        for msg, exc in reversed(output):
+            if msg is not None:
+                yield msg
+            if exc.stack:
+                yield 'Traceback (most recent call last):\n'
+                yield from exc.stack.format()
+            yield from exc.format_exception_only()
diff --git a/Lib/tracemalloc.py b/Lib/tracemalloc.py
index 69b4170..cec99c5 100644
--- a/Lib/tracemalloc.py
+++ b/Lib/tracemalloc.py
@@ -226,7 +226,7 @@
         return str(self[0])
 
     def __repr__(self):
-        s = "<Traceback %r" % tuple(self)
+        s = f"<Traceback {tuple(self)}"
         if self._total_nframe is None:
             s += ">"
         else:
diff --git a/Lib/types.py b/Lib/types.py
index ad2020e..62122a9 100644
--- a/Lib/types.py
+++ b/Lib/types.py
@@ -82,7 +82,7 @@
     updated = False
     shift = 0
     for i, base in enumerate(bases):
-        if isinstance(base, type):
+        if isinstance(base, type) and not isinstance(base, GenericAlias):
             continue
         if not hasattr(base, "__mro_entries__"):
             continue
@@ -155,7 +155,12 @@
     class's __getattr__ method; this is done by raising AttributeError.
 
     This allows one to have properties active on an instance, and have virtual
-    attributes on the class with the same name (see Enum for an example).
+    attributes on the class with the same name.  (Enum used this between Python
+    versions 3.4 - 3.9 .)
+
+    Subclass from this to use a different method of accessing virtual atributes
+    and still be treated properly by the inspect module. (Enum uses this since
+    Python 3.10 .)
 
     """
     def __init__(self, fget=None, fset=None, fdel=None, doc=None):
@@ -292,8 +297,11 @@
 
     return wrapped
 
-
 GenericAlias = type(list[int])
+UnionType = type(int | str)
 
+EllipsisType = type(Ellipsis)
+NoneType = type(None)
+NotImplementedType = type(NotImplemented)
 
 __all__ = [n for n in globals() if n[:1] != '_']
diff --git a/Lib/typing.py b/Lib/typing.py
index f5316ab..086d0f3 100644
--- a/Lib/typing.py
+++ b/Lib/typing.py
@@ -4,8 +4,10 @@
 At large scale, the structure of the module is following:
 * Imports and exports, all public names should be explicitly added to __all__.
 * Internal helper functions: these should never be used in code outside this module.
-* _SpecialForm and its instances (special forms): Any, NoReturn, ClassVar, Union, Optional
-* Two classes whose instances can be type arguments in addition to types: ForwardRef and TypeVar
+* _SpecialForm and its instances (special forms):
+  Any, NoReturn, ClassVar, Union, Optional, Concatenate
+* Classes whose instances can be type arguments in addition to types:
+  ForwardRef, TypeVar and ParamSpec
 * The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
   currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
   etc., are instances of either of these classes.
@@ -35,11 +37,13 @@
     'Any',
     'Callable',
     'ClassVar',
+    'Concatenate',
     'Final',
     'ForwardRef',
     'Generic',
     'Literal',
     'Optional',
+    'ParamSpec',
     'Protocol',
     'Tuple',
     'Type',
@@ -96,6 +100,13 @@
     'TypedDict',  # Not really a type.
     'Generator',
 
+    # Other concrete types.
+    'BinaryIO',
+    'IO',
+    'Match',
+    'Pattern',
+    'TextIO',
+
     # One-off things.
     'AnyStr',
     'cast',
@@ -103,14 +114,19 @@
     'get_args',
     'get_origin',
     'get_type_hints',
+    'is_typeddict',
     'NewType',
     'no_type_check',
     'no_type_check_decorator',
     'NoReturn',
     'overload',
+    'ParamSpecArgs',
+    'ParamSpecKwargs',
     'runtime_checkable',
     'Text',
     'TYPE_CHECKING',
+    'TypeAlias',
+    'TypeGuard',
 ]
 
 # The pseudo-submodules 're' and 'io' are part of the public
@@ -118,7 +134,16 @@
 # legitimate imports of those modules.
 
 
-def _type_check(arg, msg, is_argument=True):
+def _type_convert(arg, module=None, *, allow_special_forms=False):
+    """For converting None to type(None), and strings to ForwardRef."""
+    if arg is None:
+        return type(None)
+    if isinstance(arg, str):
+        return ForwardRef(arg, module=module, is_class=allow_special_forms)
+    return arg
+
+
+def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
     """Check that the argument is a type, and return it (internal helper).
 
     As a special case, accept None and return type(None) instead. Also wrap strings
@@ -131,27 +156,32 @@
     We append the repr() of the actual value (truncated to 100 chars).
     """
     invalid_generic_forms = (Generic, Protocol)
-    if is_argument:
-        invalid_generic_forms = invalid_generic_forms + (ClassVar, Final)
+    if not allow_special_forms:
+        invalid_generic_forms += (ClassVar,)
+        if is_argument:
+            invalid_generic_forms += (Final,)
 
-    if arg is None:
-        return type(None)
-    if isinstance(arg, str):
-        return ForwardRef(arg)
+    arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
     if (isinstance(arg, _GenericAlias) and
             arg.__origin__ in invalid_generic_forms):
         raise TypeError(f"{arg} is not valid as type argument")
-    if arg in (Any, NoReturn):
+    if arg in (Any, NoReturn, Final, TypeAlias):
         return arg
     if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
         raise TypeError(f"Plain {arg} is not valid as type argument")
-    if isinstance(arg, (type, TypeVar, ForwardRef)):
+    if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec,
+                        ParamSpecArgs, ParamSpecKwargs)):
         return arg
     if not callable(arg):
         raise TypeError(f"{msg} Got {arg!r:.100}.")
     return arg
 
 
+def _is_param_expr(arg):
+    return arg is ... or isinstance(arg,
+            (tuple, list, ParamSpec, _ConcatenateGenericAlias))
+
+
 def _type_repr(obj):
     """Return the repr() of an object, special-casing types (internal helper).
 
@@ -173,17 +203,19 @@
     return repr(obj)
 
 
-def _collect_type_vars(types):
-    """Collect all type variable contained in types in order of
-    first appearance (lexicographic order). For example::
+def _collect_type_vars(types_, typevar_types=None):
+    """Collect all type variable contained
+    in types in order of first appearance (lexicographic order). For example::
 
         _collect_type_vars((T, List[S, T])) == (T, S)
     """
+    if typevar_types is None:
+        typevar_types = TypeVar
     tvars = []
-    for t in types:
-        if isinstance(t, TypeVar) and t not in tvars:
+    for t in types_:
+        if isinstance(t, typevar_types) and t not in tvars:
             tvars.append(t)
-        if isinstance(t, (_GenericAlias, GenericAlias)):
+        if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
             tvars.extend([t for t in t.__parameters__ if t not in tvars])
     return tuple(tvars)
 
@@ -196,9 +228,27 @@
         raise TypeError(f"{cls} is not a generic class")
     alen = len(parameters)
     if alen != elen:
-        raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
+        raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
                         f" actual {alen}, expected {elen}")
 
+def _prepare_paramspec_params(cls, params):
+    """Prepares the parameters for a Generic containing ParamSpec
+    variables (internal helper).
+    """
+    # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
+    if (len(cls.__parameters__) == 1
+            and params and not _is_param_expr(params[0])):
+        assert isinstance(cls.__parameters__[0], ParamSpec)
+        return (params,)
+    else:
+        _check_generic(cls, params, len(cls.__parameters__))
+        _params = []
+        # Convert lists to tuples to help other libraries cache the results.
+        for p, tvar in zip(params, cls.__parameters__):
+            if isinstance(tvar, ParamSpec) and isinstance(p, list):
+                p = tuple(p)
+            _params.append(p)
+        return tuple(_params)
 
 def _deduplicate(params):
     # Weed out strict duplicates, preserving the first of each occurrence.
@@ -221,7 +271,7 @@
     # Flatten out Union[Union[...], ...].
     params = []
     for p in parameters:
-        if isinstance(p, _UnionGenericAlias):
+        if isinstance(p, (_UnionGenericAlias, types.UnionType)):
             params.extend(p.__args__)
         elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
             params.extend(p[1:])
@@ -270,17 +320,19 @@
 def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
     """Evaluate all forward references in the given type t.
     For use of globalns and localns see the docstring for get_type_hints().
-    recursive_guard is used to prevent prevent infinite recursion
-    with recursive ForwardRef.
+    recursive_guard is used to prevent infinite recursion with a recursive
+    ForwardRef.
     """
     if isinstance(t, ForwardRef):
         return t._evaluate(globalns, localns, recursive_guard)
-    if isinstance(t, (_GenericAlias, GenericAlias)):
+    if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
         ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
         if ev_args == t.__args__:
             return t
         if isinstance(t, GenericAlias):
             return GenericAlias(t.__origin__, ev_args)
+        if isinstance(t, types.UnionType):
+            return functools.reduce(operator.or_, ev_args)
         else:
             return t.copy_with(ev_args)
     return t
@@ -316,6 +368,12 @@
         self._name = getitem.__name__
         self.__doc__ = getitem.__doc__
 
+    def __getattr__(self, item):
+        if item in {'__name__', '__qualname__'}:
+            return self._name
+
+        raise AttributeError(item)
+
     def __mro_entries__(self, bases):
         raise TypeError(f"Cannot subclass {self!r}")
 
@@ -328,6 +386,12 @@
     def __call__(self, *args, **kwds):
         raise TypeError(f"Cannot instantiate {self!r}")
 
+    def __or__(self, other):
+        return Union[self, other]
+
+    def __ror__(self, other):
+        return Union[other, self]
+
     def __instancecheck__(self, obj):
         raise TypeError(f"{self} cannot be used with isinstance()")
 
@@ -340,9 +404,10 @@
 
 
 class _LiteralSpecialForm(_SpecialForm, _root=True):
-    @_tp_cache(typed=True)
     def __getitem__(self, parameters):
-        return self._getitem(self, parameters)
+        if not isinstance(parameters, tuple):
+            parameters = (parameters,)
+        return self._getitem(self, *parameters)
 
 
 @_SpecialForm
@@ -451,6 +516,8 @@
     parameters = _remove_dups_flatten(parameters)
     if len(parameters) == 1:
         return parameters[0]
+    if len(parameters) == 2 and type(None) in parameters:
+        return _UnionGenericAlias(self, parameters, name="Optional")
     return _UnionGenericAlias(self, parameters)
 
 @_SpecialForm
@@ -463,7 +530,8 @@
     return Union[arg, type(None)]
 
 @_LiteralSpecialForm
-def Literal(self, parameters):
+@_tp_cache(typed=True)
+def Literal(self, *parameters):
     """Special typing form to define literal types (a.k.a. value types).
 
     This form can be used to indicate to type checkers that the corresponding
@@ -486,9 +554,6 @@
     """
     # There is no '_type_check' call because arguments to Literal[...] are
     # values, not types.
-    if not isinstance(parameters, tuple):
-        parameters = (parameters,)
-
     parameters = _flatten_literal_params(parameters)
 
     try:
@@ -499,14 +564,104 @@
     return _LiteralGenericAlias(self, parameters)
 
 
+@_SpecialForm
+def TypeAlias(self, parameters):
+    """Special marker indicating that an assignment should
+    be recognized as a proper type alias definition by type
+    checkers.
+
+    For example::
+
+        Predicate: TypeAlias = Callable[..., bool]
+
+    It's invalid when used anywhere except as in the example above.
+    """
+    raise TypeError(f"{self} is not subscriptable")
+
+
+@_SpecialForm
+def Concatenate(self, parameters):
+    """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+    higher order function which adds, removes or transforms parameters of a
+    callable.
+
+    For example::
+
+       Callable[Concatenate[int, P], int]
+
+    See PEP 612 for detailed information.
+    """
+    if parameters == ():
+        raise TypeError("Cannot take a Concatenate of no types.")
+    if not isinstance(parameters, tuple):
+        parameters = (parameters,)
+    if not isinstance(parameters[-1], ParamSpec):
+        raise TypeError("The last parameter to Concatenate should be a "
+                        "ParamSpec variable.")
+    msg = "Concatenate[arg, ...]: each arg must be a type."
+    parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
+    return _ConcatenateGenericAlias(self, parameters,
+                                    _typevar_types=(TypeVar, ParamSpec),
+                                    _paramspec_tvars=True)
+
+
+@_SpecialForm
+def TypeGuard(self, parameters):
+    """Special typing form used to annotate the return type of a user-defined
+    type guard function.  ``TypeGuard`` only accepts a single type argument.
+    At runtime, functions marked this way should return a boolean.
+
+    ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+    type checkers to determine a more precise type of an expression within a
+    program's code flow.  Usually type narrowing is done by analyzing
+    conditional code flow and applying the narrowing to a block of code.  The
+    conditional expression here is sometimes referred to as a "type guard".
+
+    Sometimes it would be convenient to use a user-defined boolean function
+    as a type guard.  Such a function should use ``TypeGuard[...]`` as its
+    return type to alert static type checkers to this intention.
+
+    Using  ``-> TypeGuard`` tells the static type checker that for a given
+    function:
+
+    1. The return value is a boolean.
+    2. If the return value is ``True``, the type of its argument
+       is the type inside ``TypeGuard``.
+
+       For example::
+
+          def is_str(val: Union[str, float]):
+              # "isinstance" type guard
+              if isinstance(val, str):
+                  # Type of ``val`` is narrowed to ``str``
+                  ...
+              else:
+                  # Else, type of ``val`` is narrowed to ``float``.
+                  ...
+
+    Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+    form of ``TypeA`` (it can even be a wider form) and this may lead to
+    type-unsafe results.  The main reason is to allow for things like
+    narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+    a subtype of the former, since ``List`` is invariant.  The responsibility of
+    writing type-safe type guards is left to the user.
+
+    ``TypeGuard`` also works with type variables.  For more information, see
+    PEP 647 (User-Defined Type Guards).
+    """
+    item = _type_check(parameters, f'{self} accepts only single type.')
+    return _GenericAlias(self, (item,))
+
+
 class ForwardRef(_Final, _root=True):
     """Internal wrapper to hold a forward reference."""
 
     __slots__ = ('__forward_arg__', '__forward_code__',
                  '__forward_evaluated__', '__forward_value__',
-                 '__forward_is_argument__')
+                 '__forward_is_argument__', '__forward_is_class__',
+                 '__forward_module__')
 
-    def __init__(self, arg, is_argument=True):
+    def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
         if not isinstance(arg, str):
             raise TypeError(f"Forward reference must be a string -- got {arg!r}")
         try:
@@ -518,6 +673,8 @@
         self.__forward_evaluated__ = False
         self.__forward_value__ = None
         self.__forward_is_argument__ = is_argument
+        self.__forward_is_class__ = is_class
+        self.__forward_module__ = module
 
     def _evaluate(self, globalns, localns, recursive_guard):
         if self.__forward_arg__ in recursive_guard:
@@ -529,10 +686,15 @@
                 globalns = localns
             elif localns is None:
                 localns = globalns
-            type_ =_type_check(
+            if self.__forward_module__ is not None:
+                globalns = getattr(
+                    sys.modules.get(self.__forward_module__, None), '__dict__', globalns
+                )
+            type_ = _type_check(
                 eval(self.__forward_code__, globalns, localns),
                 "Forward references must evaluate to types.",
                 is_argument=self.__forward_is_argument__,
+                allow_special_forms=self.__forward_is_class__,
             )
             self.__forward_value__ = _eval_type(
                 type_, globalns, localns, recursive_guard | {self.__forward_arg__}
@@ -546,16 +708,50 @@
         if self.__forward_evaluated__ and other.__forward_evaluated__:
             return (self.__forward_arg__ == other.__forward_arg__ and
                     self.__forward_value__ == other.__forward_value__)
-        return self.__forward_arg__ == other.__forward_arg__
+        return (self.__forward_arg__ == other.__forward_arg__ and
+                self.__forward_module__ == other.__forward_module__)
 
     def __hash__(self):
-        return hash(self.__forward_arg__)
+        return hash((self.__forward_arg__, self.__forward_module__))
 
     def __repr__(self):
         return f'ForwardRef({self.__forward_arg__!r})'
 
+class _TypeVarLike:
+    """Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
+    def __init__(self, bound, covariant, contravariant):
+        """Used to setup TypeVars and ParamSpec's bound, covariant and
+        contravariant attributes.
+        """
+        if covariant and contravariant:
+            raise ValueError("Bivariant types are not supported.")
+        self.__covariant__ = bool(covariant)
+        self.__contravariant__ = bool(contravariant)
+        if bound:
+            self.__bound__ = _type_check(bound, "Bound must be a type.")
+        else:
+            self.__bound__ = None
 
-class TypeVar(_Final, _Immutable, _root=True):
+    def __or__(self, right):
+        return Union[self, right]
+
+    def __ror__(self, left):
+        return Union[left, self]
+
+    def __repr__(self):
+        if self.__covariant__:
+            prefix = '+'
+        elif self.__contravariant__:
+            prefix = '-'
+        else:
+            prefix = '~'
+        return prefix + self.__name__
+
+    def __reduce__(self):
+        return self.__name__
+
+
+class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
     """Type variable.
 
     Usage::
@@ -605,20 +801,13 @@
     def __init__(self, name, *constraints, bound=None,
                  covariant=False, contravariant=False):
         self.__name__ = name
-        if covariant and contravariant:
-            raise ValueError("Bivariant types are not supported.")
-        self.__covariant__ = bool(covariant)
-        self.__contravariant__ = bool(contravariant)
+        super().__init__(bound, covariant, contravariant)
         if constraints and bound is not None:
             raise TypeError("Constraints cannot be combined with bound=...")
         if constraints and len(constraints) == 1:
             raise TypeError("A single constraint is not allowed")
         msg = "TypeVar(name, constraint, ...): constraints must be types."
         self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
-        if bound:
-            self.__bound__ = _type_check(bound, "Bound must be a type.")
-        else:
-            self.__bound__ = None
         try:
             def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')  # for pickling
         except (AttributeError, ValueError):
@@ -626,17 +815,121 @@
         if def_mod != 'typing':
             self.__module__ = def_mod
 
-    def __repr__(self):
-        if self.__covariant__:
-            prefix = '+'
-        elif self.__contravariant__:
-            prefix = '-'
-        else:
-            prefix = '~'
-        return prefix + self.__name__
 
-    def __reduce__(self):
-        return self.__name__
+class ParamSpecArgs(_Final, _Immutable, _root=True):
+    """The args for a ParamSpec object.
+
+    Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+    ParamSpecArgs objects have a reference back to their ParamSpec:
+
+       P.args.__origin__ is P
+
+    This type is meant for runtime introspection and has no special meaning to
+    static type checkers.
+    """
+    def __init__(self, origin):
+        self.__origin__ = origin
+
+    def __repr__(self):
+        return f"{self.__origin__.__name__}.args"
+
+    def __eq__(self, other):
+        if not isinstance(other, ParamSpecArgs):
+            return NotImplemented
+        return self.__origin__ == other.__origin__
+
+
+class ParamSpecKwargs(_Final, _Immutable, _root=True):
+    """The kwargs for a ParamSpec object.
+
+    Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+    ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+       P.kwargs.__origin__ is P
+
+    This type is meant for runtime introspection and has no special meaning to
+    static type checkers.
+    """
+    def __init__(self, origin):
+        self.__origin__ = origin
+
+    def __repr__(self):
+        return f"{self.__origin__.__name__}.kwargs"
+
+    def __eq__(self, other):
+        if not isinstance(other, ParamSpecKwargs):
+            return NotImplemented
+        return self.__origin__ == other.__origin__
+
+
+class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
+    """Parameter specification variable.
+
+    Usage::
+
+       P = ParamSpec('P')
+
+    Parameter specification variables exist primarily for the benefit of static
+    type checkers.  They are used to forward the parameter types of one
+    callable to another callable, a pattern commonly found in higher order
+    functions and decorators.  They are only valid when used in ``Concatenate``,
+    or as the first argument to ``Callable``, or as parameters for user-defined
+    Generics.  See class Generic for more information on generic types.  An
+    example for annotating a decorator::
+
+       T = TypeVar('T')
+       P = ParamSpec('P')
+
+       def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+           '''A type-safe decorator to add logging to a function.'''
+           def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+               logging.info(f'{f.__name__} was called')
+               return f(*args, **kwargs)
+           return inner
+
+       @add_logging
+       def add_two(x: float, y: float) -> float:
+           '''Add two numbers together.'''
+           return x + y
+
+    Parameter specification variables defined with covariant=True or
+    contravariant=True can be used to declare covariant or contravariant
+    generic types.  These keyword arguments are valid, but their actual semantics
+    are yet to be decided.  See PEP 612 for details.
+
+    Parameter specification variables can be introspected. e.g.:
+
+       P.__name__ == 'T'
+       P.__bound__ == None
+       P.__covariant__ == False
+       P.__contravariant__ == False
+
+    Note that only parameter specification variables defined in global scope can
+    be pickled.
+    """
+
+    __slots__ = ('__name__', '__bound__', '__covariant__', '__contravariant__',
+                 '__dict__')
+
+    @property
+    def args(self):
+        return ParamSpecArgs(self)
+
+    @property
+    def kwargs(self):
+        return ParamSpecKwargs(self)
+
+    def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
+        self.__name__ = name
+        super().__init__(bound, covariant, contravariant)
+        try:
+            def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            def_mod = None
+        if def_mod != 'typing':
+            self.__module__ = def_mod
 
 
 def _is_dunder(attr):
@@ -681,14 +974,18 @@
         return tuple(res)
 
     def __getattr__(self, attr):
+        if attr in {'__name__', '__qualname__'}:
+            return self._name or self.__origin__.__name__
+
         # We are careful for copy and pickle.
-        # Also for simplicity we just don't relay all dunder names
+        # Also for simplicity we don't relay any dunder names
         if '__origin__' in self.__dict__ and not _is_dunder(attr):
             return getattr(self.__origin__, attr)
         raise AttributeError(attr)
 
     def __setattr__(self, attr, val):
-        if _is_dunder(attr) or attr in ('_name', '_inst', '_nparams'):
+        if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
+                                        '_typevar_types', '_paramspec_tvars'}:
             super().__setattr__(attr, val)
         else:
             setattr(self.__origin__, attr, val)
@@ -700,6 +997,9 @@
         raise TypeError("Subscripted generics cannot be used with"
                         " class and instance checks")
 
+    def __dir__(self):
+        return list(set(super().__dir__()
+                + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
 
 # Special typing constructs Union, Optional, Generic, Callable and Tuple
 # use three special attributes for internal bookkeeping of generic types:
@@ -713,14 +1013,18 @@
 
 
 class _GenericAlias(_BaseGenericAlias, _root=True):
-    def __init__(self, origin, params, *, inst=True, name=None):
+    def __init__(self, origin, params, *, inst=True, name=None,
+                 _typevar_types=TypeVar,
+                 _paramspec_tvars=False):
         super().__init__(origin, inst=inst, name=name)
         if not isinstance(params, tuple):
             params = (params,)
         self.__args__ = tuple(... if a is _TypingEllipsis else
                               () if a is _TypingEmpty else
                               a for a in params)
-        self.__parameters__ = _collect_type_vars(params)
+        self.__parameters__ = _collect_type_vars(params, typevar_types=_typevar_types)
+        self._typevar_types = _typevar_types
+        self._paramspec_tvars = _paramspec_tvars
         if not name:
             self.__module__ = origin.__module__
 
@@ -733,6 +1037,12 @@
     def __hash__(self):
         return hash((self.__origin__, self.__args__))
 
+    def __or__(self, right):
+        return Union[self, right]
+
+    def __ror__(self, left):
+        return Union[left, self]
+
     @_tp_cache
     def __getitem__(self, params):
         if self.__origin__ in (Generic, Protocol):
@@ -740,25 +1050,40 @@
             raise TypeError(f"Cannot subscript already-subscripted {self}")
         if not isinstance(params, tuple):
             params = (params,)
-        msg = "Parameters to generic types must be types."
-        params = tuple(_type_check(p, msg) for p in params)
-        _check_generic(self, params, len(self.__parameters__))
+        params = tuple(_type_convert(p) for p in params)
+        if (self._paramspec_tvars
+                and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
+            params = _prepare_paramspec_params(self, params)
+        else:
+            _check_generic(self, params, len(self.__parameters__))
 
         subst = dict(zip(self.__parameters__, params))
         new_args = []
         for arg in self.__args__:
-            if isinstance(arg, TypeVar):
-                arg = subst[arg]
-            elif isinstance(arg, (_GenericAlias, GenericAlias)):
+            if isinstance(arg, self._typevar_types):
+                if isinstance(arg, ParamSpec):
+                    arg = subst[arg]
+                    if not _is_param_expr(arg):
+                        raise TypeError(f"Expected a list of types, an ellipsis, "
+                                        f"ParamSpec, or Concatenate. Got {arg}")
+                else:
+                    arg = subst[arg]
+            elif isinstance(arg, (_GenericAlias, GenericAlias, types.UnionType)):
                 subparams = arg.__parameters__
                 if subparams:
                     subargs = tuple(subst[x] for x in subparams)
                     arg = arg[subargs]
-            new_args.append(arg)
+            # Required to flatten out the args for CallableGenericAlias
+            if self.__origin__ == collections.abc.Callable and isinstance(arg, tuple):
+                new_args.extend(arg)
+            else:
+                new_args.append(arg)
         return self.copy_with(tuple(new_args))
 
     def copy_with(self, params):
-        return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
+        return self.__class__(self.__origin__, params, name=self._name, inst=self._inst,
+                              _typevar_types=self._typevar_types,
+                              _paramspec_tvars=self._paramspec_tvars)
 
     def __repr__(self):
         if self._name:
@@ -779,6 +1104,9 @@
         return operator.getitem, (origin, args)
 
     def __mro_entries__(self, bases):
+        if isinstance(self.__origin__, _SpecialForm):
+            raise TypeError(f"Cannot subclass {self!r}")
+
         if self._name:  # generic version of an ABC or built-in class
             return super().__mro_entries__(bases)
         if self.__origin__ is Generic:
@@ -832,19 +1160,25 @@
     def __reduce__(self):
         return self._name
 
+    def __or__(self, right):
+        return Union[self, right]
+
+    def __ror__(self, left):
+        return Union[left, self]
 
 class _CallableGenericAlias(_GenericAlias, _root=True):
     def __repr__(self):
         assert self._name == 'Callable'
-        if len(self.__args__) == 2 and self.__args__[0] is Ellipsis:
+        args = self.__args__
+        if len(args) == 2 and _is_param_expr(args[0]):
             return super().__repr__()
         return (f'typing.Callable'
-                f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
-                f'{_type_repr(self.__args__[-1])}]')
+                f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
+                f'{_type_repr(args[-1])}]')
 
     def __reduce__(self):
         args = self.__args__
-        if not (len(args) == 2 and args[0] is ...):
+        if not (len(args) == 2 and _is_param_expr(args[0])):
             args = list(args[:-1]), args[-1]
         return operator.getitem, (Callable, args)
 
@@ -852,20 +1186,22 @@
 class _CallableType(_SpecialGenericAlias, _root=True):
     def copy_with(self, params):
         return _CallableGenericAlias(self.__origin__, params,
-                                     name=self._name, inst=self._inst)
+                                     name=self._name, inst=self._inst,
+                                     _typevar_types=(TypeVar, ParamSpec),
+                                     _paramspec_tvars=True)
 
     def __getitem__(self, params):
         if not isinstance(params, tuple) or len(params) != 2:
             raise TypeError("Callable must be used as "
                             "Callable[[arg, ...], result].")
         args, result = params
-        if args is Ellipsis:
-            params = (Ellipsis, result)
-        else:
-            if not isinstance(args, list):
-                raise TypeError(f"Callable[args, result]: args must be a list."
-                                f" Got {args}")
+        # This relaxes what args can be on purpose to allow things like
+        # PEP 612 ParamSpec.  Responsibility for whether a user is using
+        # Callable[...] properly is deferred to static type checkers.
+        if isinstance(args, list):
             params = (tuple(args), result)
+        else:
+            params = (args, result)
         return self.__getitem_inner__(params)
 
     @_tp_cache
@@ -875,8 +1211,9 @@
         result = _type_check(result, msg)
         if args is Ellipsis:
             return self.copy_with((_TypingEllipsis, result))
-        msg = "Callable[[arg, ...], result]: each arg must be a type."
-        args = tuple(_type_check(arg, msg) for arg in args)
+        if not isinstance(args, tuple):
+            args = (args,)
+        args = tuple(_type_convert(arg) for arg in args)
         params = args + (result,)
         return self.copy_with(params)
 
@@ -902,7 +1239,7 @@
         return Union[params]
 
     def __eq__(self, other):
-        if not isinstance(other, _UnionGenericAlias):
+        if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
             return NotImplemented
         return set(self.__args__) == set(other.__args__)
 
@@ -918,6 +1255,18 @@
                 return f'typing.Optional[{_type_repr(args[0])}]'
         return super().__repr__()
 
+    def __instancecheck__(self, obj):
+        return self.__subclasscheck__(type(obj))
+
+    def __subclasscheck__(self, cls):
+        for arg in self.__args__:
+            if issubclass(cls, arg):
+                return True
+
+    def __reduce__(self):
+        func, (origin, args) = super().__reduce__()
+        return func, (Union, args)
+
 
 def _value_and_type_iter(parameters):
     return ((p, type(p)) for p in parameters)
@@ -935,6 +1284,18 @@
         return hash(frozenset(_value_and_type_iter(self.__args__)))
 
 
+class _ConcatenateGenericAlias(_GenericAlias, _root=True):
+    def copy_with(self, params):
+        if isinstance(params[-1], (list, tuple)):
+            return (*params[:-1], *params[-1])
+        if isinstance(params[-1], _ConcatenateGenericAlias):
+            params = (*params[:-1], *params[-1].__args__)
+        elif not isinstance(params[-1], ParamSpec):
+            raise TypeError("The last parameter to Concatenate should be a "
+                            "ParamSpec variable.")
+        return super().copy_with(params)
+
+
 class Generic:
     """Abstract base class for generic types.
 
@@ -965,20 +1326,25 @@
         if not params and cls is not Tuple:
             raise TypeError(
                 f"Parameter list to {cls.__qualname__}[...] cannot be empty")
-        msg = "Parameters to generic types must be types."
-        params = tuple(_type_check(p, msg) for p in params)
+        params = tuple(_type_convert(p) for p in params)
         if cls in (Generic, Protocol):
             # Generic and Protocol can only be subscripted with unique type variables.
-            if not all(isinstance(p, TypeVar) for p in params):
+            if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
                 raise TypeError(
-                    f"Parameters to {cls.__name__}[...] must all be type variables")
+                    f"Parameters to {cls.__name__}[...] must all be type variables "
+                    f"or parameter specification variables.")
             if len(set(params)) != len(params):
                 raise TypeError(
                     f"Parameters to {cls.__name__}[...] must all be unique")
         else:
             # Subscripting a regular Generic subclass.
-            _check_generic(cls, params, len(cls.__parameters__))
-        return _GenericAlias(cls, params)
+            if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
+                params = _prepare_paramspec_params(cls, params)
+            else:
+                _check_generic(cls, params, len(cls.__parameters__))
+        return _GenericAlias(cls, params,
+                             _typevar_types=(TypeVar, ParamSpec),
+                             _paramspec_tvars=True)
 
     def __init_subclass__(cls, *args, **kwargs):
         super().__init_subclass__(*args, **kwargs)
@@ -990,7 +1356,7 @@
         if error:
             raise TypeError("Cannot inherit from plain Generic")
         if '__orig_bases__' in cls.__dict__:
-            tvars = _collect_type_vars(cls.__orig_bases__)
+            tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
             # Look for Generic[T1, ..., Tn].
             # If found, tvars must be a subset of it.
             # If not found, tvars is it.
@@ -1060,24 +1426,55 @@
     return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
 
 
-def _no_init(self, *args, **kwargs):
-    if type(self)._is_protocol:
+def _no_init_or_replace_init(self, *args, **kwargs):
+    cls = type(self)
+
+    if cls._is_protocol:
         raise TypeError('Protocols cannot be instantiated')
 
+    # Already using a custom `__init__`. No need to calculate correct
+    # `__init__` to call. This can lead to RecursionError. See bpo-45121.
+    if cls.__init__ is not _no_init_or_replace_init:
+        return
 
-def _allow_reckless_class_cheks():
+    # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
+    # The first instantiation of the subclass will call `_no_init_or_replace_init` which
+    # searches for a proper new `__init__` in the MRO. The new `__init__`
+    # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
+    # instantiation of the protocol subclass will thus use the new
+    # `__init__` and no longer call `_no_init_or_replace_init`.
+    for base in cls.__mro__:
+        init = base.__dict__.get('__init__', _no_init_or_replace_init)
+        if init is not _no_init_or_replace_init:
+            cls.__init__ = init
+            break
+    else:
+        # should not happen
+        cls.__init__ = object.__init__
+
+    cls.__init__(self, *args, **kwargs)
+
+
+def _caller(depth=1, default='__main__'):
+    try:
+        return sys._getframe(depth + 1).f_globals.get('__name__', default)
+    except (AttributeError, ValueError):  # For platforms without _getframe()
+        return None
+
+
+def _allow_reckless_class_checks(depth=3):
     """Allow instance and class checks for special stdlib modules.
 
     The abc and functools modules indiscriminately call isinstance() and
     issubclass() on the whole MRO of a user class, which may contain protocols.
     """
     try:
-        return sys._getframe(3).f_globals['__name__'] in ['abc', 'functools']
+        return sys._getframe(depth).f_globals['__name__'] in ['abc', 'functools']
     except (AttributeError, ValueError):  # For platforms without _getframe().
         return True
 
 
-_PROTO_WHITELIST = {
+_PROTO_ALLOWLIST = {
     'collections.abc': [
         'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
         'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
@@ -1092,6 +1489,14 @@
     def __instancecheck__(cls, instance):
         # We need this method for situations where attributes are
         # assigned in __init__.
+        if (
+            getattr(cls, '_is_protocol', False) and
+            not getattr(cls, '_is_runtime_protocol', False) and
+            not _allow_reckless_class_checks(depth=2)
+        ):
+            raise TypeError("Instance and class checks can only be used with"
+                            " @runtime_checkable protocols")
+
         if ((not getattr(cls, '_is_protocol', False) or
                 _is_callable_members_only(cls)) and
                 issubclass(instance.__class__, cls)):
@@ -1154,12 +1559,12 @@
 
             # First, perform various sanity checks.
             if not getattr(cls, '_is_runtime_protocol', False):
-                if _allow_reckless_class_cheks():
+                if _allow_reckless_class_checks():
                     return NotImplemented
                 raise TypeError("Instance and class checks can only be used with"
                                 " @runtime_checkable protocols")
             if not _is_callable_members_only(cls):
-                if _allow_reckless_class_cheks():
+                if _allow_reckless_class_checks():
                     return NotImplemented
                 raise TypeError("Protocols with non-method members"
                                 " don't support issubclass()")
@@ -1196,12 +1601,12 @@
         # ... otherwise check consistency of bases, and prohibit instantiation.
         for base in cls.__bases__:
             if not (base in (object, Generic) or
-                    base.__module__ in _PROTO_WHITELIST and
-                    base.__name__ in _PROTO_WHITELIST[base.__module__] or
+                    base.__module__ in _PROTO_ALLOWLIST and
+                    base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
                     issubclass(base, Generic) and base._is_protocol):
                 raise TypeError('Protocols can only inherit from other'
                                 ' protocols, got %r' % base)
-        cls.__init__ = _no_init
+        cls.__init__ = _no_init_or_replace_init
 
 
 class _AnnotatedAlias(_GenericAlias, _root=True):
@@ -1244,6 +1649,11 @@
     def __hash__(self):
         return hash((self.__origin__, self.__metadata__))
 
+    def __getattr__(self, attr):
+        if attr in {'__name__', '__qualname__'}:
+            return 'Annotated'
+        return super().__getattr__(attr)
+
 
 class Annotated:
     """Add context specific metadata to a type.
@@ -1288,7 +1698,7 @@
                             "with at least two arguments (a type and an "
                             "annotation).")
         msg = "Annotated[t, ...]: t must be a type."
-        origin = _type_check(params[0], msg)
+        origin = _type_check(params[0], msg, allow_special_forms=True)
         metadata = tuple(params[1:])
         return _AnnotatedAlias(origin, metadata)
 
@@ -1382,7 +1792,8 @@
     - If no dict arguments are passed, an attempt is made to use the
       globals from obj (or the respective module's globals for classes),
       and these are also used as the locals.  If the object does not appear
-      to have globals, an empty dictionary is used.
+      to have globals, an empty dictionary is used.  For classes, the search
+      order is globals first then locals.
 
     - If one dict argument is passed, it is used for both globals and
       locals.
@@ -1398,16 +1809,27 @@
         hints = {}
         for base in reversed(obj.__mro__):
             if globalns is None:
-                base_globals = sys.modules[base.__module__].__dict__
+                base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
             else:
                 base_globals = globalns
             ann = base.__dict__.get('__annotations__', {})
+            if isinstance(ann, types.GetSetDescriptorType):
+                ann = {}
+            base_locals = dict(vars(base)) if localns is None else localns
+            if localns is None and globalns is None:
+                # This is surprising, but required.  Before Python 3.10,
+                # get_type_hints only evaluated the globalns of
+                # a class.  To maintain backwards compatibility, we reverse
+                # the globalns and localns order so that eval() looks into
+                # *base_globals* first rather than *base_locals*.
+                # This only affects ForwardRefs.
+                base_globals, base_locals = base_locals, base_globals
             for name, value in ann.items():
                 if value is None:
                     value = type(None)
                 if isinstance(value, str):
-                    value = ForwardRef(value, is_argument=False)
-                value = _eval_type(value, base_globals, localns)
+                    value = ForwardRef(value, is_argument=False, is_class=True)
+                value = _eval_type(value, base_globals, base_locals)
                 hints[name] = value
         return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
 
@@ -1438,7 +1860,13 @@
         if value is None:
             value = type(None)
         if isinstance(value, str):
-            value = ForwardRef(value)
+            # class-level forward refs were handled above, this must be either
+            # a module-level annotation or a function argument annotation
+            value = ForwardRef(
+                value,
+                is_argument=not isinstance(obj, types.ModuleType),
+                is_class=False,
+            )
         value = _eval_type(value, globalns, localns)
         if name in defaults and defaults[name] is None:
             value = Optional[value]
@@ -1461,6 +1889,12 @@
         if stripped_args == t.__args__:
             return t
         return GenericAlias(t.__origin__, stripped_args)
+    if isinstance(t, types.UnionType):
+        stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+        if stripped_args == t.__args__:
+            return t
+        return functools.reduce(operator.or_, stripped_args)
+
     return t
 
 
@@ -1477,13 +1911,17 @@
         get_origin(Generic[T]) is Generic
         get_origin(Union[T, int]) is Union
         get_origin(List[Tuple[T, T]][int]) == list
+        get_origin(P.args) is P
     """
     if isinstance(tp, _AnnotatedAlias):
         return Annotated
-    if isinstance(tp, (_BaseGenericAlias, GenericAlias)):
+    if isinstance(tp, (_BaseGenericAlias, GenericAlias,
+                       ParamSpecArgs, ParamSpecKwargs)):
         return tp.__origin__
     if tp is Generic:
         return Generic
+    if isinstance(tp, types.UnionType):
+        return types.UnionType
     return None
 
 
@@ -1500,16 +1938,31 @@
     """
     if isinstance(tp, _AnnotatedAlias):
         return (tp.__origin__,) + tp.__metadata__
-    if isinstance(tp, _GenericAlias):
+    if isinstance(tp, (_GenericAlias, GenericAlias)):
         res = tp.__args__
-        if tp.__origin__ is collections.abc.Callable and res[0] is not Ellipsis:
+        if (tp.__origin__ is collections.abc.Callable
+                and not (len(res) == 2 and _is_param_expr(res[0]))):
             res = (list(res[:-1]), res[-1])
         return res
-    if isinstance(tp, GenericAlias):
+    if isinstance(tp, types.UnionType):
         return tp.__args__
     return ()
 
 
+def is_typeddict(tp):
+    """Check if an annotation is a TypedDict class
+
+    For example::
+        class Film(TypedDict):
+            title: str
+            year: int
+
+        is_typeddict(Film)  # => True
+        is_typeddict(Union[list, str])  # => False
+    """
+    return isinstance(tp, _TypedDictMeta)
+
+
 def no_type_check(arg):
     """Decorator to indicate that annotations are not type hints.
 
@@ -1900,7 +2353,8 @@
         own_annotation_keys = set(own_annotations.keys())
         msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
         own_annotations = {
-            n: _type_check(tp, msg) for n, tp in own_annotations.items()
+            n: _type_check(tp, msg, module=tp_dict.__module__)
+            for n, tp in own_annotations.items()
         }
         required_keys = set()
         optional_keys = set()
@@ -1980,24 +2434,24 @@
         raise TypeError("TypedDict takes either a dict or keyword arguments,"
                         " but not both")
 
-    ns = {'__annotations__': dict(fields), '__total__': total}
+    ns = {'__annotations__': dict(fields)}
     try:
         # Setting correct module is necessary to make typed dict classes pickleable.
         ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
     except (AttributeError, ValueError):
         pass
 
-    return _TypedDictMeta(typename, (), ns)
+    return _TypedDictMeta(typename, (), ns, total=total)
 
 _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
 TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
 
 
-def NewType(name, tp):
+class NewType:
     """NewType creates simple unique types with almost zero
     runtime overhead. NewType(name, tp) is considered a subtype of tp
     by static type checkers. At runtime, NewType(name, tp) returns
-    a dummy function that simply returns its argument. Usage::
+    a dummy callable that simply returns its argument. Usage::
 
         UserId = NewType('UserId', int)
 
@@ -2012,12 +2466,30 @@
         num = UserId(5) + 1     # type: int
     """
 
-    def new_type(x):
+    def __init__(self, name, tp):
+        self.__qualname__ = name
+        if '.' in name:
+            name = name.rpartition('.')[-1]
+        self.__name__ = name
+        self.__supertype__ = tp
+        def_mod = _caller()
+        if def_mod != 'typing':
+            self.__module__ = def_mod
+
+    def __repr__(self):
+        return f'{self.__module__}.{self.__qualname__}'
+
+    def __call__(self, x):
         return x
 
-    new_type.__name__ = name
-    new_type.__supertype__ = tp
-    return new_type
+    def __reduce__(self):
+        return self.__qualname__
+
+    def __or__(self, other):
+        return Union[self, other]
+
+    def __ror__(self, other):
+        return Union[other, self]
 
 
 # Python-version-specific alias (Python 2: unicode; Python 3: str)
diff --git a/Lib/unittest/_log.py b/Lib/unittest/_log.py
index 94e7e75..94868e5 100644
--- a/Lib/unittest/_log.py
+++ b/Lib/unittest/_log.py
@@ -26,11 +26,11 @@
 
 
 class _AssertLogsContext(_BaseTestCaseContext):
-    """A context manager used to implement TestCase.assertLogs()."""
+    """A context manager for assertLogs() and assertNoLogs() """
 
     LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
 
-    def __init__(self, test_case, logger_name, level):
+    def __init__(self, test_case, logger_name, level, no_logs):
         _BaseTestCaseContext.__init__(self, test_case)
         self.logger_name = logger_name
         if level:
@@ -38,6 +38,7 @@
         else:
             self.level = logging.INFO
         self.msg = None
+        self.no_logs = no_logs
 
     def __enter__(self):
         if isinstance(self.logger_name, logging.Logger):
@@ -46,6 +47,7 @@
             logger = self.logger = logging.getLogger(self.logger_name)
         formatter = logging.Formatter(self.LOGGING_FORMAT)
         handler = _CapturingHandler()
+        handler.setLevel(self.level)
         handler.setFormatter(formatter)
         self.watcher = handler.watcher
         self.old_handlers = logger.handlers[:]
@@ -54,16 +56,31 @@
         logger.handlers = [handler]
         logger.setLevel(self.level)
         logger.propagate = False
+        if self.no_logs:
+            return
         return handler.watcher
 
     def __exit__(self, exc_type, exc_value, tb):
         self.logger.handlers = self.old_handlers
         self.logger.propagate = self.old_propagate
         self.logger.setLevel(self.old_level)
+
         if exc_type is not None:
             # let unexpected exceptions pass through
             return False
-        if len(self.watcher.records) == 0:
-            self._raiseFailure(
-                "no logs of level {} or higher triggered on {}"
-                .format(logging.getLevelName(self.level), self.logger.name))
+
+        if self.no_logs:
+            # assertNoLogs
+            if len(self.watcher.records) > 0:
+                self._raiseFailure(
+                    "Unexpected logs found: {!r}".format(
+                        self.watcher.output
+                    )
+                )
+
+        else:
+            # assertLogs
+            if len(self.watcher.records) == 0:
+                self._raiseFailure(
+                    "no logs of level {} or higher triggered on {}"
+                    .format(logging.getLevelName(self.level), self.logger.name))
diff --git a/Lib/unittest/async_case.py b/Lib/unittest/async_case.py
index 1bc1312..2323119 100644
--- a/Lib/unittest/async_case.py
+++ b/Lib/unittest/async_case.py
@@ -4,7 +4,6 @@
 from .case import TestCase
 
 
-
 class IsolatedAsyncioTestCase(TestCase):
     # Names intentionally have a long prefix
     # to reduce a chance of clashing with user-defined attributes
@@ -52,7 +51,7 @@
         # We intentionally don't add inspect.iscoroutinefunction() check
         # for func argument because there is no way
         # to check for async function reliably:
-        # 1. It can be "async def func()" iself
+        # 1. It can be "async def func()" itself
         # 2. Class can implement "async def __call__()" method
         # 3. Regular "def func()" that returns awaitable object
         self.addCleanup(*(func, *args), **kwargs)
@@ -72,15 +71,15 @@
         self._callMaybeAsync(function, *args, **kwargs)
 
     def _callAsync(self, func, /, *args, **kwargs):
-        assert self._asyncioTestLoop is not None
+        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
         ret = func(*args, **kwargs)
-        assert inspect.isawaitable(ret)
+        assert inspect.isawaitable(ret), f'{func!r} returned non-awaitable'
         fut = self._asyncioTestLoop.create_future()
         self._asyncioCallsQueue.put_nowait((fut, ret))
         return self._asyncioTestLoop.run_until_complete(fut)
 
     def _callMaybeAsync(self, func, /, *args, **kwargs):
-        assert self._asyncioTestLoop is not None
+        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
         ret = func(*args, **kwargs)
         if inspect.isawaitable(ret):
             fut = self._asyncioTestLoop.create_future()
@@ -102,14 +101,14 @@
                 ret = await awaitable
                 if not fut.cancelled():
                     fut.set_result(ret)
-            except asyncio.CancelledError:
+            except (SystemExit, KeyboardInterrupt):
                 raise
-            except Exception as ex:
+            except (BaseException, asyncio.CancelledError) as ex:
                 if not fut.cancelled():
                     fut.set_exception(ex)
 
     def _setupAsyncioLoop(self):
-        assert self._asyncioTestLoop is None
+        assert self._asyncioTestLoop is None, 'asyncio test loop already initialized'
         loop = asyncio.new_event_loop()
         asyncio.set_event_loop(loop)
         loop.set_debug(True)
@@ -119,7 +118,7 @@
         loop.run_until_complete(fut)
 
     def _tearDownAsyncioLoop(self):
-        assert self._asyncioTestLoop is not None
+        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
         loop = self._asyncioTestLoop
         self._asyncioTestLoop = None
         self._asyncioCallsQueue.put_nowait(None)
@@ -135,7 +134,7 @@
                 task.cancel()
 
             loop.run_until_complete(
-                asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
+                asyncio.gather(*to_cancel, return_exceptions=True))
 
             for task in to_cancel:
                 if task.cancelled():
@@ -158,3 +157,12 @@
             return super().run(result)
         finally:
             self._tearDownAsyncioLoop()
+
+    def debug(self):
+        self._setupAsyncioLoop()
+        super().debug()
+        self._tearDownAsyncioLoop()
+
+    def __del__(self):
+        if self._asyncioTestLoop is not None:
+            self._tearDownAsyncioLoop()
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index f8bc865..61003d0 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -252,7 +252,7 @@
     def __enter__(self):
         # The __warningregistry__'s need to be in a pristine state for tests
         # to work properly.
-        for v in sys.modules.values():
+        for v in list(sys.modules.values()):
             if getattr(v, '__warningregistry__', None):
                 v.__warningregistry__ = {}
         self.warnings_manager = warnings.catch_warnings(record=True)
@@ -295,7 +295,6 @@
             self._raiseFailure("{} not triggered".format(exc_name))
 
 
-
 class _OrderedChainMap(collections.ChainMap):
     def __iter__(self):
         seen = set()
@@ -556,73 +555,71 @@
         function(*args, **kwargs)
 
     def run(self, result=None):
-        orig_result = result
         if result is None:
             result = self.defaultTestResult()
             startTestRun = getattr(result, 'startTestRun', None)
+            stopTestRun = getattr(result, 'stopTestRun', None)
             if startTestRun is not None:
                 startTestRun()
+        else:
+            stopTestRun = None
 
         result.startTest(self)
-
-        testMethod = getattr(self, self._testMethodName)
-        if (getattr(self.__class__, "__unittest_skip__", False) or
-            getattr(testMethod, "__unittest_skip__", False)):
-            # If the class or method was skipped.
-            try:
+        try:
+            testMethod = getattr(self, self._testMethodName)
+            if (getattr(self.__class__, "__unittest_skip__", False) or
+                getattr(testMethod, "__unittest_skip__", False)):
+                # If the class or method was skipped.
                 skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
                             or getattr(testMethod, '__unittest_skip_why__', ''))
                 self._addSkip(result, self, skip_why)
-            finally:
-                result.stopTest(self)
-            return
-        expecting_failure_method = getattr(testMethod,
-                                           "__unittest_expecting_failure__", False)
-        expecting_failure_class = getattr(self,
-                                          "__unittest_expecting_failure__", False)
-        expecting_failure = expecting_failure_class or expecting_failure_method
-        outcome = _Outcome(result)
-        try:
-            self._outcome = outcome
+                return result
 
-            with outcome.testPartExecutor(self):
-                self._callSetUp()
-            if outcome.success:
-                outcome.expecting_failure = expecting_failure
-                with outcome.testPartExecutor(self, isTest=True):
-                    self._callTestMethod(testMethod)
-                outcome.expecting_failure = False
+            expecting_failure = (
+                getattr(self, "__unittest_expecting_failure__", False) or
+                getattr(testMethod, "__unittest_expecting_failure__", False)
+            )
+            outcome = _Outcome(result)
+            try:
+                self._outcome = outcome
+
                 with outcome.testPartExecutor(self):
-                    self._callTearDown()
+                    self._callSetUp()
+                if outcome.success:
+                    outcome.expecting_failure = expecting_failure
+                    with outcome.testPartExecutor(self, isTest=True):
+                        self._callTestMethod(testMethod)
+                    outcome.expecting_failure = False
+                    with outcome.testPartExecutor(self):
+                        self._callTearDown()
 
-            self.doCleanups()
-            for test, reason in outcome.skipped:
-                self._addSkip(result, test, reason)
-            self._feedErrorsToResult(result, outcome.errors)
-            if outcome.success:
-                if expecting_failure:
-                    if outcome.expectedFailure:
-                        self._addExpectedFailure(result, outcome.expectedFailure)
+                self.doCleanups()
+                for test, reason in outcome.skipped:
+                    self._addSkip(result, test, reason)
+                self._feedErrorsToResult(result, outcome.errors)
+                if outcome.success:
+                    if expecting_failure:
+                        if outcome.expectedFailure:
+                            self._addExpectedFailure(result, outcome.expectedFailure)
+                        else:
+                            self._addUnexpectedSuccess(result)
                     else:
-                        self._addUnexpectedSuccess(result)
-                else:
-                    result.addSuccess(self)
-            return result
+                        result.addSuccess(self)
+                return result
+            finally:
+                # explicitly break reference cycles:
+                # outcome.errors -> frame -> outcome -> outcome.errors
+                # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
+                outcome.errors.clear()
+                outcome.expectedFailure = None
+
+                # clear the outcome, no more needed
+                self._outcome = None
+
         finally:
             result.stopTest(self)
-            if orig_result is None:
-                stopTestRun = getattr(result, 'stopTestRun', None)
-                if stopTestRun is not None:
-                    stopTestRun()
-
-            # explicitly break reference cycles:
-            # outcome.errors -> frame -> outcome -> outcome.errors
-            # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
-            outcome.errors.clear()
-            outcome.expectedFailure = None
-
-            # clear the outcome, no more needed
-            self._outcome = None
+            if stopTestRun is not None:
+                stopTestRun()
 
     def doCleanups(self):
         """Execute all cleanup functions. Normally called for you after
@@ -654,12 +651,20 @@
 
     def debug(self):
         """Run the test without collecting errors in a TestResult"""
-        self.setUp()
-        getattr(self, self._testMethodName)()
-        self.tearDown()
+        testMethod = getattr(self, self._testMethodName)
+        if (getattr(self.__class__, "__unittest_skip__", False) or
+            getattr(testMethod, "__unittest_skip__", False)):
+            # If the class or method was skipped.
+            skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
+                        or getattr(testMethod, '__unittest_skip_why__', ''))
+            raise SkipTest(skip_why)
+
+        self._callSetUp()
+        self._callTestMethod(testMethod)
+        self._callTearDown()
         while self._cleanups:
-            function, args, kwargs = self._cleanups.pop(-1)
-            function(*args, **kwargs)
+            function, args, kwargs = self._cleanups.pop()
+            self._callCleanup(function, *args, **kwargs)
 
     def skipTest(self, reason):
         """Skip this test."""
@@ -788,7 +793,16 @@
         """
         # Lazy import to avoid importing logging if it is not needed.
         from ._log import _AssertLogsContext
-        return _AssertLogsContext(self, logger, level)
+        return _AssertLogsContext(self, logger, level, no_logs=False)
+
+    def assertNoLogs(self, logger=None, level=None):
+        """ Fail unless no log messages of level *level* or higher are emitted
+        on *logger_name* or its children.
+
+        This method must be used as a context manager.
+        """
+        from ._log import _AssertLogsContext
+        return _AssertLogsContext(self, logger, level, no_logs=True)
 
     def _getAssertEqualityFunc(self, first, second):
         """Get a detailed comparison function for the types of the two args.
@@ -1132,7 +1146,8 @@
     def assertDictContainsSubset(self, subset, dictionary, msg=None):
         """Checks whether dictionary is a superset of subset."""
         warnings.warn('assertDictContainsSubset is deprecated',
-                      DeprecationWarning)
+                      DeprecationWarning,
+                      stacklevel=2)
         missing = []
         mismatched = []
         for key, value in subset.items():
diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py
index b495a5f..7152f86 100644
--- a/Lib/unittest/mock.py
+++ b/Lib/unittest/mock.py
@@ -36,6 +36,10 @@
 from functools import wraps, partial
 
 
+class InvalidSpecError(Exception):
+    """Indicates that an invalid value was used as a mock spec."""
+
+
 _builtins = {name for name in dir(builtins) if not name.startswith('_')}
 
 FILTER_DIR = True
@@ -406,7 +410,7 @@
             # Check if spec is an async object or function
             bound_args = _MOCK_SIG.bind_partial(cls, *args, **kw).arguments
             spec_arg = bound_args.get('spec_set', bound_args.get('spec'))
-            if spec_arg and _is_async_obj(spec_arg):
+            if spec_arg is not None and _is_async_obj(spec_arg):
                 bases = (AsyncMockMixin, cls)
         new = type(cls.__name__, bases, {'__doc__': cls.__doc__})
         instance = _safe_super(NonCallableMock, cls).__new__(new)
@@ -631,9 +635,10 @@
         elif _is_magic(name):
             raise AttributeError(name)
         if not self._mock_unsafe:
-            if name.startswith(('assert', 'assret')):
-                raise AttributeError("Attributes cannot start with 'assert' "
-                                     "or 'assret'")
+            if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')):
+                raise AttributeError(
+                    f"{name!r} is not a valid assertion. Use a spec "
+                    f"for the mock if {name!r} is meant to be an attribute.")
 
         result = self._mock_children.get(name)
         if result is _deleted:
@@ -652,10 +657,17 @@
             self._mock_children[name]  = result
 
         elif isinstance(result, _SpecState):
-            result = create_autospec(
-                result.spec, result.spec_set, result.instance,
-                result.parent, result.name
-            )
+            try:
+                result = create_autospec(
+                    result.spec, result.spec_set, result.instance,
+                    result.parent, result.name
+                )
+            except InvalidSpecError:
+                target_name = self.__dict__['_mock_name'] or self
+                raise InvalidSpecError(
+                    f'Cannot autospec attr {name!r} from target '
+                    f'{target_name!r} as it has already been mocked out. '
+                    f'[target={self!r}, attr={result.spec!r}]')
             self._mock_children[name]  = result
 
         return result
@@ -992,6 +1004,11 @@
         if _new_name in self.__dict__['_spec_asyncs']:
             return AsyncMock(**kw)
 
+        if self._mock_sealed:
+            attribute = f".{kw['name']}" if "name" in kw else "()"
+            mock_name = self._extract_mock_name() + attribute
+            raise AttributeError(mock_name)
+
         _type = type(self)
         if issubclass(_type, MagicMock) and _new_name in _async_method_magics:
             # Any asynchronous magic becomes an AsyncMock
@@ -1010,12 +1027,6 @@
                 klass = Mock
         else:
             klass = _type.__mro__[1]
-
-        if self._mock_sealed:
-            attribute = "." + kw["name"] if "name" in kw else "()"
-            mock_name = self._extract_mock_name() + attribute
-            raise AttributeError(mock_name)
-
         return klass(**kw)
 
 
@@ -1241,6 +1252,17 @@
     return thing
 
 
+# _check_spec_arg_typos takes kwargs from commands like patch and checks that
+# they don't contain common misspellings of arguments related to autospeccing.
+def _check_spec_arg_typos(kwargs_to_check):
+    typos = ("autospect", "auto_spec", "set_spec")
+    for typo in typos:
+        if typo in kwargs_to_check:
+            raise RuntimeError(
+                f"{typo!r} might be a typo; use unsafe=True if this is intended"
+            )
+
+
 class _patch(object):
 
     attribute_name = None
@@ -1248,7 +1270,7 @@
 
     def __init__(
             self, getter, attribute, new, spec, create,
-            spec_set, autospec, new_callable, kwargs
+            spec_set, autospec, new_callable, kwargs, *, unsafe=False
         ):
         if new_callable is not None:
             if new is not DEFAULT:
@@ -1259,6 +1281,16 @@
                 raise ValueError(
                     "Cannot use 'autospec' and 'new_callable' together"
                 )
+        if not unsafe:
+            _check_spec_arg_typos(kwargs)
+        if _is_instance_mock(spec):
+            raise InvalidSpecError(
+                f'Cannot spec attr {attribute!r} as the spec '
+                f'has already been mocked out. [spec={spec!r}]')
+        if _is_instance_mock(spec_set):
+            raise InvalidSpecError(
+                f'Cannot spec attr {attribute!r} as the spec_set '
+                f'target has already been mocked out. [spec_set={spec_set!r}]')
 
         self.getter = getter
         self.attribute = attribute
@@ -1486,6 +1518,18 @@
             if autospec is True:
                 autospec = original
 
+            if _is_instance_mock(self.target):
+                raise InvalidSpecError(
+                    f'Cannot autospec attr {self.attribute!r} as the patch '
+                    f'target has already been mocked out. '
+                    f'[target={self.target!r}, attr={autospec!r}]')
+            if _is_instance_mock(autospec):
+                target_name = getattr(self.target, '__name__', self.target)
+                raise InvalidSpecError(
+                    f'Cannot autospec attr {self.attribute!r} from target '
+                    f'{target_name!r} as it has already been mocked out. '
+                    f'[target={self.target!r}, attr={autospec!r}]')
+
             new = create_autospec(autospec, spec_set=spec_set,
                                   _name=self.attribute, **kwargs)
         elif kwargs:
@@ -1558,9 +1602,9 @@
 def _get_target(target):
     try:
         target, attribute = target.rsplit('.', 1)
-    except (TypeError, ValueError):
-        raise TypeError("Need a valid target to patch. You supplied: %r" %
-                        (target,))
+    except (TypeError, ValueError, AttributeError):
+        raise TypeError(
+            f"Need a valid target to patch. You supplied: {target!r}")
     getter = lambda: _importer(target)
     return getter, attribute
 
@@ -1568,7 +1612,7 @@
 def _patch_object(
         target, attribute, new=DEFAULT, spec=None,
         create=False, spec_set=None, autospec=None,
-        new_callable=None, **kwargs
+        new_callable=None, *, unsafe=False, **kwargs
     ):
     """
     patch the named member (`attribute`) on an object (`target`) with a mock
@@ -1590,7 +1634,7 @@
     getter = lambda: target
     return _patch(
         getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs
+        spec_set, autospec, new_callable, kwargs, unsafe=unsafe
     )
 
 
@@ -1645,7 +1689,7 @@
 
 def patch(
         target, new=DEFAULT, spec=None, create=False,
-        spec_set=None, autospec=None, new_callable=None, **kwargs
+        spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs
     ):
     """
     `patch` acts as a function decorator, class decorator or a context
@@ -1707,6 +1751,10 @@
     use "as" then the patched object will be bound to the name after the
     "as"; very useful if `patch` is creating a mock object for you.
 
+    Patch will raise a `RuntimeError` if passed some common misspellings of
+    the arguments autospec and spec_set. Pass the argument `unsafe` with the
+    value True to disable that check.
+
     `patch` takes arbitrary keyword arguments. These will be passed to
     `AsyncMock` if the patched object is asynchronous, to `MagicMock`
     otherwise or to `new_callable` if specified.
@@ -1717,7 +1765,7 @@
     getter, attribute = _get_target(target)
     return _patch(
         getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs
+        spec_set, autospec, new_callable, kwargs, unsafe=unsafe
     )
 
 
@@ -2567,7 +2615,7 @@
 
 
 def create_autospec(spec, spec_set=False, instance=False, _parent=None,
-                    _name=None, **kwargs):
+                    _name=None, *, unsafe=False, **kwargs):
     """Create a mock object using another object as a spec. Attributes on the
     mock will use the corresponding attribute on the `spec` object as their
     spec.
@@ -2583,6 +2631,10 @@
     spec for an instance object by passing `instance=True`. The returned mock
     will only be callable if instances of the mock are callable.
 
+    `create_autospec` will raise a `RuntimeError` if passed some common
+    misspellings of the arguments autospec and spec_set. Pass the argument
+    `unsafe` with the value True to disable that check.
+
     `create_autospec` also takes arbitrary keyword arguments that are passed to
     the constructor of the created mock."""
     if _is_list(spec):
@@ -2591,6 +2643,9 @@
         spec = type(spec)
 
     is_type = isinstance(spec, type)
+    if _is_instance_mock(spec):
+        raise InvalidSpecError(f'Cannot autospec a Mock object. '
+                               f'[object={spec!r}]')
     is_async_func = _is_async_func(spec)
     _kwargs = {'spec': spec}
     if spec_set:
@@ -2600,6 +2655,8 @@
         _kwargs = {}
     if _kwargs and instance:
         _kwargs['_spec_as_instance'] = True
+    if not unsafe:
+        _check_spec_arg_typos(kwargs)
 
     _kwargs.update(kwargs)
 
@@ -2869,6 +2926,8 @@
             continue
         if not isinstance(m, NonCallableMock):
             continue
+        if isinstance(m._mock_children.get(attr), _SpecState):
+            continue
         if m._mock_new_parent is mock:
             seal(m)
 
diff --git a/Lib/unittest/result.py b/Lib/unittest/result.py
index 111317b..3da7005 100644
--- a/Lib/unittest/result.py
+++ b/Lib/unittest/result.py
@@ -173,17 +173,10 @@
     def _exc_info_to_string(self, err, test):
         """Converts a sys.exc_info()-style tuple of values into a string."""
         exctype, value, tb = err
-        # Skip test runner traceback levels
-        while tb and self._is_relevant_tb_level(tb):
-            tb = tb.tb_next
-
-        if exctype is test.failureException:
-            # Skip assert*() traceback levels
-            length = self._count_relevant_tb_levels(tb)
-        else:
-            length = None
+        tb = self._clean_tracebacks(exctype, value, tb, test)
         tb_e = traceback.TracebackException(
-            exctype, value, tb, limit=length, capture_locals=self.tb_locals)
+            exctype, value, tb,
+            capture_locals=self.tb_locals, compact=True)
         msgLines = list(tb_e.format())
 
         if self.buffer:
@@ -199,16 +192,49 @@
                 msgLines.append(STDERR_LINE % error)
         return ''.join(msgLines)
 
+    def _clean_tracebacks(self, exctype, value, tb, test):
+        ret = None
+        first = True
+        excs = [(exctype, value, tb)]
+        while excs:
+            (exctype, value, tb) = excs.pop()
+            # Skip test runner traceback levels
+            while tb and self._is_relevant_tb_level(tb):
+                tb = tb.tb_next
+
+            # Skip assert*() traceback levels
+            if exctype is test.failureException:
+                self._remove_unittest_tb_frames(tb)
+
+            if first:
+                ret = tb
+                first = False
+            else:
+                value.__traceback__ = tb
+
+            if value is not None:
+                for c in (value.__cause__, value.__context__):
+                    if c is not None:
+                        excs.append((type(c), c, c.__traceback__))
+        return ret
 
     def _is_relevant_tb_level(self, tb):
         return '__unittest' in tb.tb_frame.f_globals
 
-    def _count_relevant_tb_levels(self, tb):
-        length = 0
+    def _remove_unittest_tb_frames(self, tb):
+        '''Truncates usercode tb at the first unittest frame.
+
+        If the first frame of the traceback is in user code,
+        the prefix up to the first unittest frame is returned.
+        If the first frame is already in the unittest module,
+        the traceback is not modified.
+        '''
+        prev = None
         while tb and not self._is_relevant_tb_level(tb):
-            length += 1
+            prev = tb
             tb = tb.tb_next
-        return length
+        if prev is not None:
+            prev.tb_next = None
 
     def __repr__(self):
         return ("<%s run=%i errors=%i failures=%i>" %
diff --git a/Lib/unittest/runner.py b/Lib/unittest/runner.py
index 45e7e4c..caf1590 100644
--- a/Lib/unittest/runner.py
+++ b/Lib/unittest/runner.py
@@ -59,6 +59,7 @@
         super(TextTestResult, self).addSuccess(test)
         if self.showAll:
             self.stream.writeln("ok")
+            self.stream.flush()
         elif self.dots:
             self.stream.write('.')
             self.stream.flush()
@@ -67,6 +68,7 @@
         super(TextTestResult, self).addError(test, err)
         if self.showAll:
             self.stream.writeln("ERROR")
+            self.stream.flush()
         elif self.dots:
             self.stream.write('E')
             self.stream.flush()
@@ -75,6 +77,7 @@
         super(TextTestResult, self).addFailure(test, err)
         if self.showAll:
             self.stream.writeln("FAIL")
+            self.stream.flush()
         elif self.dots:
             self.stream.write('F')
             self.stream.flush()
@@ -83,6 +86,7 @@
         super(TextTestResult, self).addSkip(test, reason)
         if self.showAll:
             self.stream.writeln("skipped {0!r}".format(reason))
+            self.stream.flush()
         elif self.dots:
             self.stream.write("s")
             self.stream.flush()
@@ -91,6 +95,7 @@
         super(TextTestResult, self).addExpectedFailure(test, err)
         if self.showAll:
             self.stream.writeln("expected failure")
+            self.stream.flush()
         elif self.dots:
             self.stream.write("x")
             self.stream.flush()
@@ -99,6 +104,7 @@
         super(TextTestResult, self).addUnexpectedSuccess(test)
         if self.showAll:
             self.stream.writeln("unexpected success")
+            self.stream.flush()
         elif self.dots:
             self.stream.write("u")
             self.stream.flush()
@@ -106,6 +112,7 @@
     def printErrors(self):
         if self.dots or self.showAll:
             self.stream.writeln()
+            self.stream.flush()
         self.printErrorList('ERROR', self.errors)
         self.printErrorList('FAIL', self.failures)
 
@@ -115,6 +122,7 @@
             self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
             self.stream.writeln(self.separator2)
             self.stream.writeln("%s" % err)
+            self.stream.flush()
 
 
 class TextTestRunner(object):
@@ -218,4 +226,5 @@
             self.stream.writeln(" (%s)" % (", ".join(infos),))
         else:
             self.stream.write("\n")
+        self.stream.flush()
         return result
diff --git a/Lib/unittest/suite.py b/Lib/unittest/suite.py
index 41993f9..6f45b6f 100644
--- a/Lib/unittest/suite.py
+++ b/Lib/unittest/suite.py
@@ -149,6 +149,7 @@
         if getattr(currentClass, "__unittest_skip__", False):
             return
 
+        failed = False
         try:
             currentClass._classSetupFailed = False
         except TypeError:
@@ -157,27 +158,32 @@
             pass
 
         setUpClass = getattr(currentClass, 'setUpClass', None)
+        doClassCleanups = getattr(currentClass, 'doClassCleanups', None)
         if setUpClass is not None:
             _call_if_exists(result, '_setupStdout')
             try:
-                setUpClass()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                currentClass._classSetupFailed = True
-                className = util.strclass(currentClass)
-                self._createClassOrModuleLevelException(result, e,
-                                                        'setUpClass',
-                                                        className)
+                try:
+                    setUpClass()
+                except Exception as e:
+                    if isinstance(result, _DebugResult):
+                        raise
+                    failed = True
+                    try:
+                        currentClass._classSetupFailed = True
+                    except TypeError:
+                        pass
+                    className = util.strclass(currentClass)
+                    self._createClassOrModuleLevelException(result, e,
+                                                            'setUpClass',
+                                                            className)
+                if failed and doClassCleanups is not None:
+                    doClassCleanups()
+                    for exc_info in currentClass.tearDown_exceptions:
+                        self._createClassOrModuleLevelException(
+                                result, exc_info[1], 'setUpClass', className,
+                                info=exc_info)
             finally:
                 _call_if_exists(result, '_restoreStdout')
-                if currentClass._classSetupFailed is True:
-                    currentClass.doClassCleanups()
-                    if len(currentClass.tearDown_exceptions) > 0:
-                        for exc in currentClass.tearDown_exceptions:
-                            self._createClassOrModuleLevelException(
-                                    result, exc[1], 'setUpClass', className,
-                                    info=exc)
 
     def _get_previous_module(self, result):
         previousModule = None
@@ -205,20 +211,22 @@
         if setUpModule is not None:
             _call_if_exists(result, '_setupStdout')
             try:
-                setUpModule()
-            except Exception as e:
                 try:
-                    case.doModuleCleanups()
-                except Exception as exc:
-                    self._createClassOrModuleLevelException(result, exc,
+                    setUpModule()
+                except Exception as e:
+                    if isinstance(result, _DebugResult):
+                        raise
+                    result._moduleSetUpFailed = True
+                    self._createClassOrModuleLevelException(result, e,
                                                             'setUpModule',
                                                             currentModule)
-                if isinstance(result, _DebugResult):
-                    raise
-                result._moduleSetUpFailed = True
-                self._createClassOrModuleLevelException(result, e,
-                                                        'setUpModule',
-                                                        currentModule)
+                if result._moduleSetUpFailed:
+                    try:
+                        case.doModuleCleanups()
+                    except Exception as e:
+                        self._createClassOrModuleLevelException(result, e,
+                                                                'setUpModule',
+                                                                currentModule)
             finally:
                 _call_if_exists(result, '_restoreStdout')
 
@@ -251,30 +259,33 @@
         except KeyError:
             return
 
-        tearDownModule = getattr(module, 'tearDownModule', None)
-        if tearDownModule is not None:
-            _call_if_exists(result, '_setupStdout')
+        _call_if_exists(result, '_setupStdout')
+        try:
+            tearDownModule = getattr(module, 'tearDownModule', None)
+            if tearDownModule is not None:
+                try:
+                    tearDownModule()
+                except Exception as e:
+                    if isinstance(result, _DebugResult):
+                        raise
+                    self._createClassOrModuleLevelException(result, e,
+                                                            'tearDownModule',
+                                                            previousModule)
             try:
-                tearDownModule()
+                case.doModuleCleanups()
             except Exception as e:
                 if isinstance(result, _DebugResult):
                     raise
                 self._createClassOrModuleLevelException(result, e,
                                                         'tearDownModule',
                                                         previousModule)
-            finally:
-                _call_if_exists(result, '_restoreStdout')
-                try:
-                    case.doModuleCleanups()
-                except Exception as e:
-                    self._createClassOrModuleLevelException(result, e,
-                                                            'tearDownModule',
-                                                            previousModule)
+        finally:
+            _call_if_exists(result, '_restoreStdout')
 
     def _tearDownPreviousClass(self, test, result):
         previousClass = getattr(result, '_previousTestClass', None)
         currentClass = test.__class__
-        if currentClass == previousClass:
+        if currentClass == previousClass or previousClass is None:
             return
         if getattr(previousClass, '_classSetupFailed', False):
             return
@@ -284,27 +295,34 @@
             return
 
         tearDownClass = getattr(previousClass, 'tearDownClass', None)
-        if tearDownClass is not None:
-            _call_if_exists(result, '_setupStdout')
-            try:
-                tearDownClass()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                className = util.strclass(previousClass)
-                self._createClassOrModuleLevelException(result, e,
-                                                        'tearDownClass',
-                                                        className)
-            finally:
-                _call_if_exists(result, '_restoreStdout')
-                previousClass.doClassCleanups()
-                if len(previousClass.tearDown_exceptions) > 0:
-                    for exc in previousClass.tearDown_exceptions:
-                        className = util.strclass(previousClass)
-                        self._createClassOrModuleLevelException(result, exc[1],
-                                                                'tearDownClass',
-                                                                className,
-                                                                info=exc)
+        doClassCleanups = getattr(previousClass, 'doClassCleanups', None)
+        if tearDownClass is None and doClassCleanups is None:
+            return
+
+        _call_if_exists(result, '_setupStdout')
+        try:
+            if tearDownClass is not None:
+                try:
+                    tearDownClass()
+                except Exception as e:
+                    if isinstance(result, _DebugResult):
+                        raise
+                    className = util.strclass(previousClass)
+                    self._createClassOrModuleLevelException(result, e,
+                                                            'tearDownClass',
+                                                            className)
+            if doClassCleanups is not None:
+                doClassCleanups()
+                for exc_info in previousClass.tearDown_exceptions:
+                    if isinstance(result, _DebugResult):
+                        raise exc_info[1]
+                    className = util.strclass(previousClass)
+                    self._createClassOrModuleLevelException(result, exc_info[1],
+                                                            'tearDownClass',
+                                                            className,
+                                                            info=exc_info)
+        finally:
+            _call_if_exists(result, '_restoreStdout')
 
 
 class _ErrorHolder(object):
diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
index ea897c3..b35997b 100644
--- a/Lib/urllib/parse.py
+++ b/Lib/urllib/parse.py
@@ -78,6 +78,9 @@
                 '0123456789'
                 '+-.')
 
+# Unsafe bytes to be removed per WHATWG spec
+_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
+
 # XXX: Consider replacing with functools.lru_cache
 MAX_CACHE_SIZE = 20
 _parse_cache = {}
@@ -453,6 +456,11 @@
     """
 
     url, scheme, _coerce_result = _coerce_args(url, scheme)
+
+    for b in _UNSAFE_URL_BYTES_TO_REMOVE:
+        url = url.replace(b, "")
+        scheme = scheme.replace(b, "")
+
     allow_fragments = bool(allow_fragments)
     key = url, scheme, allow_fragments, type(url), type(scheme)
     cached = _parse_cache.get(key, None)
@@ -662,7 +670,7 @@
 
 
 def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-             encoding='utf-8', errors='replace', max_num_fields=None):
+             encoding='utf-8', errors='replace', max_num_fields=None, separator='&'):
     """Parse a query given as a string argument.
 
         Arguments:
@@ -686,12 +694,15 @@
         max_num_fields: int. If set, then throws a ValueError if there
             are more than n fields read by parse_qsl().
 
+        separator: str. The symbol to use for separating the query arguments.
+            Defaults to &.
+
         Returns a dictionary.
     """
     parsed_result = {}
     pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
                       encoding=encoding, errors=errors,
-                      max_num_fields=max_num_fields)
+                      max_num_fields=max_num_fields, separator=separator)
     for name, value in pairs:
         if name in parsed_result:
             parsed_result[name].append(value)
@@ -701,7 +712,7 @@
 
 
 def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-              encoding='utf-8', errors='replace', max_num_fields=None):
+              encoding='utf-8', errors='replace', max_num_fields=None, separator='&'):
     """Parse a query given as a string argument.
 
         Arguments:
@@ -724,21 +735,27 @@
         max_num_fields: int. If set, then throws a ValueError
             if there are more than n fields read by parse_qsl().
 
+        separator: str. The symbol to use for separating the query arguments.
+            Defaults to &.
+
         Returns a list, as G-d intended.
     """
     qs, _coerce_result = _coerce_args(qs)
+    separator, _ = _coerce_args(separator)
+
+    if not separator or (not isinstance(separator, (str, bytes))):
+        raise ValueError("Separator must be of type string or bytes.")
 
     # If max_num_fields is defined then check that the number of fields
     # is less than max_num_fields. This prevents a memory exhaustion DOS
     # attack via post bodies with many fields.
     if max_num_fields is not None:
-        num_fields = 1 + qs.count('&') + qs.count(';')
+        num_fields = 1 + qs.count(separator)
         if max_num_fields < num_fields:
             raise ValueError('Max number of fields exceeded')
 
-    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
     r = []
-    for name_value in pairs:
+    for name_value in qs.split(separator):
         if not name_value and not strict_parsing:
             continue
         nv = name_value.split('=', 1)
diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py
index a8c870b..34b1b0b 100644
--- a/Lib/urllib/request.py
+++ b/Lib/urllib/request.py
@@ -64,7 +64,7 @@
 # install it
 urllib.request.install_opener(opener)
 
-f = urllib.request.urlopen('http://www.python.org/')
+f = urllib.request.urlopen('https://www.python.org/')
 """
 
 # XXX issues:
@@ -202,6 +202,8 @@
         context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH,
                                              cafile=cafile,
                                              capath=capath)
+        # send ALPN extension to indicate HTTP/1.1 protocol
+        context.set_alpn_protocols(['http/1.1'])
         https_handler = HTTPSHandler(context=context)
         opener = build_opener(https_handler)
     elif context:
@@ -771,7 +773,11 @@
             raise ValueError("proxy URL with no authority: %r" % proxy)
         # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
         # and 3.3.), path is empty or starts with '/'
-        end = r_scheme.find("/", 2)
+        if '@' in r_scheme:
+            host_separator = r_scheme.find('@')
+            end = r_scheme.find("/", host_separator)
+        else:
+            end = r_scheme.find("/", 2)
         if end == -1:
             end = None
         authority = r_scheme[2:end]
@@ -883,10 +889,10 @@
             return True
         if base[0] != test[0]:
             return False
-        common = posixpath.commonprefix((base[1], test[1]))
-        if len(common) == len(base[1]):
-            return True
-        return False
+        prefix = base[1]
+        if prefix[-1:] != '/':
+            prefix += '/'
+        return test[1].startswith(prefix)
 
 
 class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
@@ -939,7 +945,7 @@
     # (single quotes are a violation of the RFC, but appear in the wild)
     rx = re.compile('(?:^|,)'   # start of the string or ','
                     '[ \t]*'    # optional whitespaces
-                    '([^ \t]+)' # scheme like "Basic"
+                    '([^ \t,]+)' # scheme like "Basic"
                     '[ \t]+'    # mandatory whitespaces
                     # realm=xxx
                     # realm='xxx'
diff --git a/Lib/weakref.py b/Lib/weakref.py
index 5fa851d..994ea8a 100644
--- a/Lib/weakref.py
+++ b/Lib/weakref.py
@@ -2,7 +2,7 @@
 
 This module is an implementation of PEP 205:
 
-http://www.python.org/dev/peps/pep-0205/
+https://www.python.org/dev/peps/pep-0205/
 """
 
 # Naming convention: Variables named "wr" are weak reference objects;
@@ -119,14 +119,17 @@
         self.data = {}
         self.update(other, **kw)
 
-    def _commit_removals(self):
-        l = self._pending_removals
+    def _commit_removals(self, _atomic_removal=_remove_dead_weakref):
+        pop = self._pending_removals.pop
         d = self.data
         # We shouldn't encounter any KeyError, because this method should
         # always be called *before* mutating the dict.
-        while l:
-            key = l.pop()
-            _remove_dead_weakref(d, key)
+        while True:
+            try:
+                key = pop()
+            except IndexError:
+                return
+            _atomic_removal(d, key)
 
     def __getitem__(self, key):
         if self._pending_removals:
@@ -370,7 +373,10 @@
                 if self._iterating:
                     self._pending_removals.append(k)
                 else:
-                    del self.data[k]
+                    try:
+                        del self.data[k]
+                    except KeyError:
+                        pass
         self._remove = remove
         # A list of dead weakrefs (keys to be removed)
         self._pending_removals = []
@@ -384,11 +390,16 @@
         # because a dead weakref never compares equal to a live weakref,
         # even if they happened to refer to equal objects.
         # However, it means keys may already have been removed.
-        l = self._pending_removals
+        pop = self._pending_removals.pop
         d = self.data
-        while l:
+        while True:
             try:
-                del d[l.pop()]
+                key = pop()
+            except IndexError:
+                return
+
+            try:
+                del d[key]
             except KeyError:
                 pass
 
diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py
index 6023c1e..ec3cece 100644
--- a/Lib/webbrowser.py
+++ b/Lib/webbrowser.py
@@ -1,5 +1,5 @@
 #! /usr/bin/env python3
-"""Interfaces for launching and remotely controlling Web browsers."""
+"""Interfaces for launching and remotely controlling web browsers."""
 # Maintained by Georg Brandl.
 
 import os
@@ -532,6 +532,10 @@
         # OS X can use below Unix support (but we prefer using the OS X
         # specific stuff)
 
+    if sys.platform == "serenityos":
+        # SerenityOS webbrowser, simply called "Browser".
+        register("Browser", None, BackgroundBrowser("Browser"))
+
     if sys.platform[:3] == "win":
         # First try to use the default Windows browser
         register("windows-default", WindowsDefault)
diff --git a/Lib/wsgiref/validate.py b/Lib/wsgiref/validate.py
index 48ac007..6e16578 100644
--- a/Lib/wsgiref/validate.py
+++ b/Lib/wsgiref/validate.py
@@ -137,7 +137,7 @@
 
     """
     When applied between a WSGI server and a WSGI application, this
-    middleware will check for WSGI compliancy on a number of levels.
+    middleware will check for WSGI compliance on a number of levels.
     This middleware does not modify the request or response in any
     way, but will raise an AssertionError if anything seems off
     (except for a failure to close the application iterator, which
diff --git a/Lib/xml/etree/ElementInclude.py b/Lib/xml/etree/ElementInclude.py
index 5303062..40a9b22 100644
--- a/Lib/xml/etree/ElementInclude.py
+++ b/Lib/xml/etree/ElementInclude.py
@@ -42,7 +42,7 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/psf/license for licensing details.
+# See https://www.python.org/psf/license for licensing details.
 
 ##
 # Limited XInclude support for the ElementTree package.
diff --git a/Lib/xml/etree/ElementPath.py b/Lib/xml/etree/ElementPath.py
index d318e65..a1170b5 100644
--- a/Lib/xml/etree/ElementPath.py
+++ b/Lib/xml/etree/ElementPath.py
@@ -48,7 +48,7 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/psf/license for licensing details.
+# See https://www.python.org/psf/license for licensing details.
 
 ##
 # Implementation module for XPath support.  There's usually no reason
@@ -65,8 +65,9 @@
     r"//?|"
     r"\.\.|"
     r"\(\)|"
+    r"!=|"
     r"[/.*:\[\]\(\)@=])|"
-    r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
+    r"((?:\{[^}]+\})?[^/\[\]\(\)@!=\s]+)|"
     r"\s+"
     )
 
@@ -253,15 +254,19 @@
                 if elem.get(key) is not None:
                     yield elem
         return select
-    if signature == "@-='":
-        # [@attribute='value']
+    if signature == "@-='" or signature == "@-!='":
+        # [@attribute='value'] or [@attribute!='value']
         key = predicate[1]
         value = predicate[-1]
         def select(context, result):
             for elem in result:
                 if elem.get(key) == value:
                     yield elem
-        return select
+        def select_negated(context, result):
+            for elem in result:
+                if (attr_value := elem.get(key)) is not None and attr_value != value:
+                    yield elem
+        return select_negated if '!=' in signature else select
     if signature == "-" and not re.match(r"\-?\d+$", predicate[0]):
         # [tag]
         tag = predicate[0]
@@ -270,8 +275,10 @@
                 if elem.find(tag) is not None:
                     yield elem
         return select
-    if signature == ".='" or (signature == "-='" and not re.match(r"\-?\d+$", predicate[0])):
-        # [.='value'] or [tag='value']
+    if signature == ".='" or signature == ".!='" or (
+            (signature == "-='" or signature == "-!='")
+            and not re.match(r"\-?\d+$", predicate[0])):
+        # [.='value'] or [tag='value'] or [.!='value'] or [tag!='value']
         tag = predicate[0]
         value = predicate[-1]
         if tag:
@@ -281,12 +288,22 @@
                         if "".join(e.itertext()) == value:
                             yield elem
                             break
+            def select_negated(context, result):
+                for elem in result:
+                    for e in elem.iterfind(tag):
+                        if "".join(e.itertext()) != value:
+                            yield elem
+                            break
         else:
             def select(context, result):
                 for elem in result:
                     if "".join(elem.itertext()) == value:
                         yield elem
-        return select
+            def select_negated(context, result):
+                for elem in result:
+                    if "".join(elem.itertext()) != value:
+                        yield elem
+        return select_negated if '!=' in signature else select
     if signature == "-" or signature == "-()" or signature == "-()-":
         # [index] or [last()] or [last()-index]
         if signature == "-":
diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py
index 7a26900..07be860 100644
--- a/Lib/xml/etree/ElementTree.py
+++ b/Lib/xml/etree/ElementTree.py
@@ -35,7 +35,7 @@
 
 #---------------------------------------------------------------------
 # Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/psf/license for licensing details.
+# See https://www.python.org/psf/license for licensing details.
 #
 # ElementTree
 # Copyright (c) 1999-2008 by Fredrik Lundh.  All rights reserved.
@@ -252,7 +252,7 @@
         """
         for element in elements:
             self._assert_is_element(element)
-        self._children.extend(elements)
+            self._children.append(element)
 
     def insert(self, index, subelement):
         """Insert *subelement* at position *index*."""
@@ -1248,8 +1248,14 @@
     # Use the internal, undocumented _parser argument for now; When the
     # parser argument of iterparse is removed, this can be killed.
     pullparser = XMLPullParser(events=events, _parser=parser)
-    def iterator():
+
+    def iterator(source):
+        close_source = False
         try:
+            if not hasattr(source, "read"):
+                source = open(source, "rb")
+                close_source = True
+            yield None
             while True:
                 yield from pullparser.read_events()
                 # load event buffer
@@ -1265,16 +1271,12 @@
                 source.close()
 
     class IterParseIterator(collections.abc.Iterator):
-        __next__ = iterator().__next__
+        __next__ = iterator(source).__next__
     it = IterParseIterator()
     it.root = None
     del iterator, IterParseIterator
 
-    close_source = False
-    if not hasattr(source, "read"):
-        source = open(source, "rb")
-        close_source = True
-
+    next(it)
     return it
 
 
@@ -1283,7 +1285,7 @@
     def __init__(self, events=None, *, _parser=None):
         # The _parser argument is for internal use only and must not be relied
         # upon in user code. It will be removed in a future release.
-        # See http://bugs.python.org/issue17741 for more details.
+        # See https://bugs.python.org/issue17741 for more details.
 
         self._events_queue = collections.deque()
         self._parser = _parser or XMLParser(target=TreeBuilder())
@@ -1560,7 +1562,6 @@
         # Configure pyexpat: buffering, new-style attribute handling.
         parser.buffer_text = 1
         parser.ordered_attributes = 1
-        parser.specified_attributes = 1
         self._doctype = None
         self.entity = {}
         try:
@@ -1580,7 +1581,6 @@
         for event_name in events_to_report:
             if event_name == "start":
                 parser.ordered_attributes = 1
-                parser.specified_attributes = 1
                 def handler(tag, attrib_in, event=event_name, append=append,
                             start=self._start):
                     append((event, start(tag, attrib_in)))
diff --git a/Lib/xml/etree/__init__.py b/Lib/xml/etree/__init__.py
index 27fd8f6..e2ec534 100644
--- a/Lib/xml/etree/__init__.py
+++ b/Lib/xml/etree/__init__.py
@@ -30,4 +30,4 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/psf/license for licensing details.
+# See https://www.python.org/psf/license for licensing details.
diff --git a/Lib/xml/sax/handler.py b/Lib/xml/sax/handler.py
index 481733d..e8d417e 100644
--- a/Lib/xml/sax/handler.py
+++ b/Lib/xml/sax/handler.py
@@ -340,3 +340,48 @@
                   property_xml_string,
                   property_encoding,
                   property_interning_dict]
+
+
+class LexicalHandler:
+    """Optional SAX2 handler for lexical events.
+
+    This handler is used to obtain lexical information about an XML
+    document, that is, information about how the document was encoded
+    (as opposed to what it contains, which is reported to the
+    ContentHandler), such as comments and CDATA marked section
+    boundaries.
+
+    To set the LexicalHandler of an XMLReader, use the setProperty
+    method with the property identifier
+    'http://xml.org/sax/properties/lexical-handler'."""
+
+    def comment(self, content):
+        """Reports a comment anywhere in the document (including the
+        DTD and outside the document element).
+
+        content is a string that holds the contents of the comment."""
+
+    def startDTD(self, name, public_id, system_id):
+        """Report the start of the DTD declarations, if the document
+        has an associated DTD.
+
+        A startEntity event will be reported before declaration events
+        from the external DTD subset are reported, and this can be
+        used to infer from which subset DTD declarations derive.
+
+        name is the name of the document element type, public_id the
+        public identifier of the DTD (or None if none were supplied)
+        and system_id the system identfier of the external subset (or
+        None if none were supplied)."""
+
+    def endDTD(self):
+        """Signals the end of DTD declarations."""
+
+    def startCDATA(self):
+        """Reports the beginning of a CDATA marked section.
+
+        The contents of the CDATA marked section will be reported
+        through the characters event."""
+
+    def endCDATA(self):
+        """Reports the end of a CDATA marked section."""
diff --git a/Lib/xmlrpc/client.py b/Lib/xmlrpc/client.py
index d15d60d..a614cef 100644
--- a/Lib/xmlrpc/client.py
+++ b/Lib/xmlrpc/client.py
@@ -264,16 +264,22 @@
 
 # Issue #13305: different format codes across platforms
 _day0 = datetime(1, 1, 1)
-if _day0.strftime('%Y') == '0001':      # Mac OS X
+def _try(fmt):
+    try:
+        return _day0.strftime(fmt) == '0001'
+    except ValueError:
+        return False
+if _try('%Y'):      # Mac OS X
     def _iso8601_format(value):
         return value.strftime("%Y%m%dT%H:%M:%S")
-elif _day0.strftime('%4Y') == '0001':   # Linux
+elif _try('%4Y'):   # Linux
     def _iso8601_format(value):
         return value.strftime("%4Y%m%dT%H:%M:%S")
 else:
     def _iso8601_format(value):
         return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
 del _day0
+del _try
 
 
 def _strftime(value):
@@ -1421,11 +1427,13 @@
         # establish a "logical" server connection
 
         # get the url
-        p = urllib.parse.urlparse(uri)
+        p = urllib.parse.urlsplit(uri)
         if p.scheme not in ("http", "https"):
             raise OSError("unsupported XML-RPC protocol")
         self.__host = p.netloc
-        self.__handler = p.path or "/RPC2"
+        self.__handler = urllib.parse.urlunsplit(["", "", *p[2:]])
+        if not self.__handler:
+            self.__handler = "/RPC2"
 
         if transport is None:
             if p.scheme == "https":
diff --git a/Lib/xmlrpc/server.py b/Lib/xmlrpc/server.py
index 287e324..69a260f 100644
--- a/Lib/xmlrpc/server.py
+++ b/Lib/xmlrpc/server.py
@@ -750,7 +750,7 @@
                 url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif pep:
-                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
+                url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif text[end:end+1] == '(':
                 results.append(self.namelink(name, methods, funcs, classes))
diff --git a/Lib/zipfile.py b/Lib/zipfile.py
index 816f858..67cfdfb 100644
--- a/Lib/zipfile.py
+++ b/Lib/zipfile.py
@@ -16,6 +16,7 @@
 import threading
 import time
 import contextlib
+import pathlib
 
 try:
     import zlib # We may need its compression method
@@ -1120,8 +1121,15 @@
     def write(self, data):
         if self.closed:
             raise ValueError('I/O operation on closed file.')
-        nbytes = len(data)
+
+        # Accept any data that supports the buffer protocol
+        if isinstance(data, (bytes, bytearray)):
+            nbytes = len(data)
+        else:
+            data = memoryview(data)
+            nbytes = data.nbytes
         self._file_size += nbytes
+
         self._crc = crc32(data, self._crc)
         if self._compressor:
             data = self._compressor.compress(data)
@@ -2197,13 +2205,12 @@
         if not isinstance(source, ZipFile):
             return cls(source)
 
-        # Only allow for FastPath when supplied zipfile is read-only
+        # Only allow for FastLookup when supplied zipfile is read-only
         if 'r' not in source.mode:
             cls = CompleteDirs
 
-        res = cls.__new__(cls)
-        vars(res).update(vars(source))
-        return res
+        source.__class__ = cls
+        return source
 
 
 class FastLookup(CompleteDirs):
@@ -2211,6 +2218,7 @@
     ZipFile subclass to ensure implicit
     dirs exist and are resolved rapidly.
     """
+
     def namelist(self):
         with contextlib.suppress(AttributeError):
             return self.__names
@@ -2242,7 +2250,7 @@
     >>> zf.writestr('a.txt', 'content of a')
     >>> zf.writestr('b/c.txt', 'content of c')
     >>> zf.writestr('b/d/e.txt', 'content of e')
-    >>> zf.filename = 'abcde.zip'
+    >>> zf.filename = 'mem/abcde.zip'
 
     Path accepts the zipfile object itself or a filename
 
@@ -2254,9 +2262,9 @@
 
     >>> a, b = root.iterdir()
     >>> a
-    Path('abcde.zip', 'a.txt')
+    Path('mem/abcde.zip', 'a.txt')
     >>> b
-    Path('abcde.zip', 'b/')
+    Path('mem/abcde.zip', 'b/')
 
     name property:
 
@@ -2267,7 +2275,7 @@
 
     >>> c = b / 'c.txt'
     >>> c
-    Path('abcde.zip', 'b/c.txt')
+    Path('mem/abcde.zip', 'b/c.txt')
     >>> c.name
     'c.txt'
 
@@ -2285,36 +2293,68 @@
 
     Coercion to string:
 
-    >>> str(c)
-    'abcde.zip/b/c.txt'
+    >>> import os
+    >>> str(c).replace(os.sep, posixpath.sep)
+    'mem/abcde.zip/b/c.txt'
+
+    At the root, ``name``, ``filename``, and ``parent``
+    resolve to the zipfile. Note these attributes are not
+    valid and will raise a ``ValueError`` if the zipfile
+    has no filename.
+
+    >>> root.name
+    'abcde.zip'
+    >>> str(root.filename).replace(os.sep, posixpath.sep)
+    'mem/abcde.zip'
+    >>> str(root.parent)
+    'mem'
     """
 
     __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
 
     def __init__(self, root, at=""):
+        """
+        Construct a Path from a ZipFile or filename.
+
+        Note: When the source is an existing ZipFile object,
+        its type (__class__) will be mutated to a
+        specialized type. If the caller wishes to retain the
+        original type, the caller should either create a
+        separate ZipFile object or pass a filename.
+        """
         self.root = FastLookup.make(root)
         self.at = at
 
-    def open(self, mode='r', *args, **kwargs):
+    def open(self, mode='r', *args, pwd=None, **kwargs):
         """
         Open this entry as text or binary following the semantics
         of ``pathlib.Path.open()`` by passing arguments through
         to io.TextIOWrapper().
         """
-        pwd = kwargs.pop('pwd', None)
+        if self.is_dir():
+            raise IsADirectoryError(self)
         zip_mode = mode[0]
+        if not self.exists() and zip_mode == 'r':
+            raise FileNotFoundError(self)
         stream = self.root.open(self.at, zip_mode, pwd=pwd)
         if 'b' in mode:
             if args or kwargs:
                 raise ValueError("encoding args invalid for binary operation")
             return stream
+        else:
+            kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
         return io.TextIOWrapper(stream, *args, **kwargs)
 
     @property
     def name(self):
-        return posixpath.basename(self.at.rstrip("/"))
+        return pathlib.Path(self.at).name or self.filename.name
+
+    @property
+    def filename(self):
+        return pathlib.Path(self.root.filename).joinpath(self.at)
 
     def read_text(self, *args, **kwargs):
+        kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
         with self.open('r', *args, **kwargs) as strm:
             return strm.read()
 
@@ -2326,13 +2366,13 @@
         return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
 
     def _next(self, at):
-        return Path(self.root, at)
+        return self.__class__(self.root, at)
 
     def is_dir(self):
         return not self.at or self.at.endswith("/")
 
     def is_file(self):
-        return not self.is_dir()
+        return self.exists() and not self.is_dir()
 
     def exists(self):
         return self.at in self.root._name_set()
@@ -2349,14 +2389,16 @@
     def __repr__(self):
         return self.__repr.format(self=self)
 
-    def joinpath(self, add):
-        next = posixpath.join(self.at, add)
+    def joinpath(self, *other):
+        next = posixpath.join(self.at, *other)
         return self._next(self.root.resolve_dir(next))
 
     __truediv__ = joinpath
 
     @property
     def parent(self):
+        if not self.at:
+            return self.filename.parent
         parent_at = posixpath.dirname(self.at.rstrip('/'))
         if parent_at:
             parent_at += '/'
diff --git a/Lib/zipimport.py b/Lib/zipimport.py
index 5ef0a17..25eaee9 100644
--- a/Lib/zipimport.py
+++ b/Lib/zipimport.py
@@ -22,6 +22,7 @@
 import marshal  # for loads
 import sys  # for modules
 import time  # for mktime
+import _warnings  # For warn()
 
 __all__ = ['ZipImportError', 'zipimporter']
 
@@ -42,7 +43,7 @@
 STRING_END_ARCHIVE = b'PK\x05\x06'
 MAX_COMMENT_LEN = (1 << 16) - 1
 
-class zipimporter:
+class zipimporter(_bootstrap_external._LoaderBasics):
     """zipimporter(archivepath) -> zipimporter object
 
     Create a new zipimporter instance. 'archivepath' must be a path to
@@ -115,7 +116,12 @@
         full path name if it's possibly a portion of a namespace package,
         or None otherwise. The optional 'path' argument is ignored -- it's
         there for compatibility with the importer protocol.
+
+        Deprecated since Python 3.10. Use find_spec() instead.
         """
+        _warnings.warn("zipimporter.find_loader() is deprecated and slated for "
+                       "removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         mi = _get_module_info(self, fullname)
         if mi is not None:
             # This is a module or package.
@@ -146,15 +152,46 @@
         instance itself if the module was found, or None if it wasn't.
         The optional 'path' argument is ignored -- it's there for compatibility
         with the importer protocol.
+
+        Deprecated since Python 3.10. Use find_spec() instead.
         """
+        _warnings.warn("zipimporter.find_module() is deprecated and slated for "
+                       "removal in Python 3.12; use find_spec() instead",
+                       DeprecationWarning)
         return self.find_loader(fullname, path)[0]
 
+    def find_spec(self, fullname, target=None):
+        """Create a ModuleSpec for the specified module.
+
+        Returns None if the module cannot be found.
+        """
+        module_info = _get_module_info(self, fullname)
+        if module_info is not None:
+            return _bootstrap.spec_from_loader(fullname, self, is_package=module_info)
+        else:
+            # Not a module or regular package. See if this is a directory, and
+            # therefore possibly a portion of a namespace package.
+
+            # We're only interested in the last path component of fullname
+            # earlier components are recorded in self.prefix.
+            modpath = _get_module_path(self, fullname)
+            if _is_dir(self, modpath):
+                # This is possibly a portion of a namespace
+                # package. Return the string representing its path,
+                # without a trailing separator.
+                path = f'{self.archive}{path_sep}{modpath}'
+                spec = _bootstrap.ModuleSpec(name=fullname, loader=None,
+                                             is_package=True)
+                spec.submodule_search_locations.append(path)
+                return spec
+            else:
+                return None
 
     def get_code(self, fullname):
         """get_code(fullname) -> code object.
 
         Return the code object for the specified module. Raise ZipImportError
-        if the module couldn't be found.
+        if the module couldn't be imported.
         """
         code, ispackage, modpath = _get_module_code(self, fullname)
         return code
@@ -184,7 +221,8 @@
     def get_filename(self, fullname):
         """get_filename(fullname) -> filename string.
 
-        Return the filename for the specified module.
+        Return the filename for the specified module or raise ZipImportError
+        if it couldn't be imported.
         """
         # Deciding the filename requires working out where the code
         # would come from if the module was actually loaded
@@ -236,8 +274,13 @@
 
         Load the module specified by 'fullname'. 'fullname' must be the
         fully qualified (dotted) module name. It returns the imported
-        module, or raises ZipImportError if it wasn't found.
+        module, or raises ZipImportError if it could not be imported.
+
+        Deprecated since Python 3.10. Use exec_module() instead.
         """
+        msg = ("zipimport.zipimporter.load_module() is deprecated and slated for "
+               "removal in Python 3.12; use exec_module() instead")
+        _warnings.warn(msg, DeprecationWarning)
         code, ispackage, modpath = _get_module_code(self, fullname)
         mod = sys.modules.get(fullname)
         if mod is None or not isinstance(mod, _module_type):
@@ -280,11 +323,18 @@
                 return None
         except ZipImportError:
             return None
-        if not _ZipImportResourceReader._registered:
-            from importlib.abc import ResourceReader
-            ResourceReader.register(_ZipImportResourceReader)
-            _ZipImportResourceReader._registered = True
-        return _ZipImportResourceReader(self, fullname)
+        from importlib.readers import ZipReader
+        return ZipReader(self, fullname)
+
+
+    def invalidate_caches(self):
+        """Reload the file data of the archive path."""
+        try:
+            self._files = _read_directory(self.archive)
+            _zip_directory_cache[self.archive] = self._files
+        except ZipImportError:
+            _zip_directory_cache.pop(self.archive, None)
+            self._files = {}
 
 
     def __repr__(self):
@@ -580,20 +630,15 @@
 
 
 # Given the contents of a .py[co] file, unmarshal the data
-# and return the code object. Return None if it the magic word doesn't
-# match, or if the recorded .py[co] metadata does not match the source,
-# (we do this instead of raising an exception as we fall back
-# to .py if available and we don't want to mask other errors).
+# and return the code object. Raises ImportError it the magic word doesn't
+# match, or if the recorded .py[co] metadata does not match the source.
 def _unmarshal_code(self, pathname, fullpath, fullname, data):
     exc_details = {
         'name': fullname,
         'path': fullpath,
     }
 
-    try:
-        flags = _bootstrap_external._classify_pyc(data, fullname, exc_details)
-    except ImportError:
-        return None
+    flags = _bootstrap_external._classify_pyc(data, fullname, exc_details)
 
     hash_based = flags & 0b1 != 0
     if hash_based:
@@ -607,11 +652,8 @@
                     source_bytes,
                 )
 
-                try:
-                    _bootstrap_external._validate_hash_pyc(
-                        data, source_hash, fullname, exc_details)
-                except ImportError:
-                    return None
+                _bootstrap_external._validate_hash_pyc(
+                    data, source_hash, fullname, exc_details)
     else:
         source_mtime, source_size = \
             _get_mtime_and_size_of_source(self, fullpath)
@@ -697,6 +739,7 @@
 # 'fullname'.
 def _get_module_code(self, fullname):
     path = _get_module_path(self, fullname)
+    import_error = None
     for suffix, isbytecode, ispackage in _zip_searchorder:
         fullpath = path + suffix
         _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2)
@@ -707,8 +750,12 @@
         else:
             modpath = toc_entry[0]
             data = _get_data(self.archive, toc_entry)
+            code = None
             if isbytecode:
-                code = _unmarshal_code(self, modpath, fullpath, fullname, data)
+                try:
+                    code = _unmarshal_code(self, modpath, fullpath, fullname, data)
+                except ImportError as exc:
+                    import_error = exc
             else:
                 code = _compile_source(modpath, data)
             if code is None:
@@ -718,75 +765,8 @@
             modpath = toc_entry[0]
             return code, ispackage, modpath
     else:
-        raise ZipImportError(f"can't find module {fullname!r}", name=fullname)
-
-
-class _ZipImportResourceReader:
-    """Private class used to support ZipImport.get_resource_reader().
-
-    This class is allowed to reference all the innards and private parts of
-    the zipimporter.
-    """
-    _registered = False
-
-    def __init__(self, zipimporter, fullname):
-        self.zipimporter = zipimporter
-        self.fullname = fullname
-
-    def open_resource(self, resource):
-        fullname_as_path = self.fullname.replace('.', '/')
-        path = f'{fullname_as_path}/{resource}'
-        from io import BytesIO
-        try:
-            return BytesIO(self.zipimporter.get_data(path))
-        except OSError:
-            raise FileNotFoundError(path)
-
-    def resource_path(self, resource):
-        # All resources are in the zip file, so there is no path to the file.
-        # Raising FileNotFoundError tells the higher level API to extract the
-        # binary data and create a temporary file.
-        raise FileNotFoundError
-
-    def is_resource(self, name):
-        # Maybe we could do better, but if we can get the data, it's a
-        # resource.  Otherwise it isn't.
-        fullname_as_path = self.fullname.replace('.', '/')
-        path = f'{fullname_as_path}/{name}'
-        try:
-            self.zipimporter.get_data(path)
-        except OSError:
-            return False
-        return True
-
-    def contents(self):
-        # This is a bit convoluted, because fullname will be a module path,
-        # but _files is a list of file names relative to the top of the
-        # archive's namespace.  We want to compare file paths to find all the
-        # names of things inside the module represented by fullname.  So we
-        # turn the module path of fullname into a file path relative to the
-        # top of the archive, and then we iterate through _files looking for
-        # names inside that "directory".
-        from pathlib import Path
-        fullname_path = Path(self.zipimporter.get_filename(self.fullname))
-        relative_path = fullname_path.relative_to(self.zipimporter.archive)
-        # Don't forget that fullname names a package, so its path will include
-        # __init__.py, which we want to ignore.
-        assert relative_path.name == '__init__.py'
-        package_path = relative_path.parent
-        subdirs_seen = set()
-        for filename in self.zipimporter._files:
-            try:
-                relative = Path(filename).relative_to(package_path)
-            except ValueError:
-                continue
-            # If the path of the file (which is relative to the top of the zip
-            # namespace), relative to the package given when the resource
-            # reader was created, has a parent, then it's a name in a
-            # subdirectory and thus we skip it.
-            parent_name = relative.parent.name
-            if len(parent_name) == 0:
-                yield relative.name
-            elif parent_name not in subdirs_seen:
-                subdirs_seen.add(parent_name)
-                yield parent_name
+        if import_error:
+            msg = f"module load failed: {import_error}"
+            raise ZipImportError(msg, name=fullname) from import_error
+        else:
+            raise ZipImportError(f"can't find module {fullname!r}", name=fullname)
diff --git a/Lib/zoneinfo/_common.py b/Lib/zoneinfo/_common.py
index 41c898f..4c24f01 100644
--- a/Lib/zoneinfo/_common.py
+++ b/Lib/zoneinfo/_common.py
@@ -136,8 +136,7 @@
     ]
 
     def __init__(self, *args):
-        assert len(self.__slots__) == len(args)
-        for attr, val in zip(self.__slots__, args):
+        for attr, val in zip(self.__slots__, args, strict=True):
             setattr(self, attr, val)
 
     @classmethod
diff --git a/Lib/zoneinfo/_tzpath.py b/Lib/zoneinfo/_tzpath.py
index 9513611..672560b 100644
--- a/Lib/zoneinfo/_tzpath.py
+++ b/Lib/zoneinfo/_tzpath.py
@@ -42,7 +42,7 @@
         msg = _get_invalid_paths_message(raw_tzpath)
 
         warnings.warn(
-            "Invalid paths specified in PYTHONTZPATH environment variable."
+            "Invalid paths specified in PYTHONTZPATH environment variable. "
             + msg,
             InvalidTZPathWarning,
         )
diff --git a/Lib/zoneinfo/_zoneinfo.py b/Lib/zoneinfo/_zoneinfo.py
index 9810637..de68380 100644
--- a/Lib/zoneinfo/_zoneinfo.py
+++ b/Lib/zoneinfo/_zoneinfo.py
@@ -338,7 +338,7 @@
                 comp_idx = trans_idx[i + 1]
 
                 # If the following transition is also DST and we couldn't
-                # find the DST offset by this point, we're going ot have to
+                # find the DST offset by this point, we're going to have to
                 # skip it and hope this transition gets assigned later
                 if isdsts[comp_idx]:
                     continue
diff --git a/include/Python-ast.h b/include/Python-ast.h
deleted file mode 100644
index e7afa1e..0000000
--- a/include/Python-ast.h
+++ /dev/null
@@ -1,697 +0,0 @@
-/* File automatically generated by Parser/asdl_c.py. */
-
-#ifndef Py_PYTHON_AST_H
-#define Py_PYTHON_AST_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_LIMITED_API
-#include "asdl.h"
-
-#undef Yield   /* undefine macro conflicting with <winbase.h> */
-
-typedef struct _mod *mod_ty;
-
-typedef struct _stmt *stmt_ty;
-
-typedef struct _expr *expr_ty;
-
-typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
-
-typedef enum _boolop { And=1, Or=2 } boolop_ty;
-
-typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
-                         LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
-                         FloorDiv=13 } operator_ty;
-
-typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
-
-typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
-                      In=9, NotIn=10 } cmpop_ty;
-
-typedef struct _comprehension *comprehension_ty;
-
-typedef struct _excepthandler *excepthandler_ty;
-
-typedef struct _arguments *arguments_ty;
-
-typedef struct _arg *arg_ty;
-
-typedef struct _keyword *keyword_ty;
-
-typedef struct _alias *alias_ty;
-
-typedef struct _withitem *withitem_ty;
-
-typedef struct _type_ignore *type_ignore_ty;
-
-
-enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
-                 FunctionType_kind=4};
-struct _mod {
-    enum _mod_kind kind;
-    union {
-        struct {
-            asdl_seq *body;
-            asdl_seq *type_ignores;
-        } Module;
-
-        struct {
-            asdl_seq *body;
-        } Interactive;
-
-        struct {
-            expr_ty body;
-        } Expression;
-
-        struct {
-            asdl_seq *argtypes;
-            expr_ty returns;
-        } FunctionType;
-
-    } v;
-};
-
-enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
-                  Return_kind=4, Delete_kind=5, Assign_kind=6,
-                  AugAssign_kind=7, AnnAssign_kind=8, For_kind=9,
-                  AsyncFor_kind=10, While_kind=11, If_kind=12, With_kind=13,
-                  AsyncWith_kind=14, Raise_kind=15, Try_kind=16,
-                  Assert_kind=17, Import_kind=18, ImportFrom_kind=19,
-                  Global_kind=20, Nonlocal_kind=21, Expr_kind=22, Pass_kind=23,
-                  Break_kind=24, Continue_kind=25};
-struct _stmt {
-    enum _stmt_kind kind;
-    union {
-        struct {
-            identifier name;
-            arguments_ty args;
-            asdl_seq *body;
-            asdl_seq *decorator_list;
-            expr_ty returns;
-            string type_comment;
-        } FunctionDef;
-
-        struct {
-            identifier name;
-            arguments_ty args;
-            asdl_seq *body;
-            asdl_seq *decorator_list;
-            expr_ty returns;
-            string type_comment;
-        } AsyncFunctionDef;
-
-        struct {
-            identifier name;
-            asdl_seq *bases;
-            asdl_seq *keywords;
-            asdl_seq *body;
-            asdl_seq *decorator_list;
-        } ClassDef;
-
-        struct {
-            expr_ty value;
-        } Return;
-
-        struct {
-            asdl_seq *targets;
-        } Delete;
-
-        struct {
-            asdl_seq *targets;
-            expr_ty value;
-            string type_comment;
-        } Assign;
-
-        struct {
-            expr_ty target;
-            operator_ty op;
-            expr_ty value;
-        } AugAssign;
-
-        struct {
-            expr_ty target;
-            expr_ty annotation;
-            expr_ty value;
-            int simple;
-        } AnnAssign;
-
-        struct {
-            expr_ty target;
-            expr_ty iter;
-            asdl_seq *body;
-            asdl_seq *orelse;
-            string type_comment;
-        } For;
-
-        struct {
-            expr_ty target;
-            expr_ty iter;
-            asdl_seq *body;
-            asdl_seq *orelse;
-            string type_comment;
-        } AsyncFor;
-
-        struct {
-            expr_ty test;
-            asdl_seq *body;
-            asdl_seq *orelse;
-        } While;
-
-        struct {
-            expr_ty test;
-            asdl_seq *body;
-            asdl_seq *orelse;
-        } If;
-
-        struct {
-            asdl_seq *items;
-            asdl_seq *body;
-            string type_comment;
-        } With;
-
-        struct {
-            asdl_seq *items;
-            asdl_seq *body;
-            string type_comment;
-        } AsyncWith;
-
-        struct {
-            expr_ty exc;
-            expr_ty cause;
-        } Raise;
-
-        struct {
-            asdl_seq *body;
-            asdl_seq *handlers;
-            asdl_seq *orelse;
-            asdl_seq *finalbody;
-        } Try;
-
-        struct {
-            expr_ty test;
-            expr_ty msg;
-        } Assert;
-
-        struct {
-            asdl_seq *names;
-        } Import;
-
-        struct {
-            identifier module;
-            asdl_seq *names;
-            int level;
-        } ImportFrom;
-
-        struct {
-            asdl_seq *names;
-        } Global;
-
-        struct {
-            asdl_seq *names;
-        } Nonlocal;
-
-        struct {
-            expr_ty value;
-        } Expr;
-
-    } v;
-    int lineno;
-    int col_offset;
-    int end_lineno;
-    int end_col_offset;
-};
-
-enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
-                  Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
-                  ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
-                  GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
-                  YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
-                  FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
-                  Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
-                  Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
-struct _expr {
-    enum _expr_kind kind;
-    union {
-        struct {
-            boolop_ty op;
-            asdl_seq *values;
-        } BoolOp;
-
-        struct {
-            expr_ty target;
-            expr_ty value;
-        } NamedExpr;
-
-        struct {
-            expr_ty left;
-            operator_ty op;
-            expr_ty right;
-        } BinOp;
-
-        struct {
-            unaryop_ty op;
-            expr_ty operand;
-        } UnaryOp;
-
-        struct {
-            arguments_ty args;
-            expr_ty body;
-        } Lambda;
-
-        struct {
-            expr_ty test;
-            expr_ty body;
-            expr_ty orelse;
-        } IfExp;
-
-        struct {
-            asdl_seq *keys;
-            asdl_seq *values;
-        } Dict;
-
-        struct {
-            asdl_seq *elts;
-        } Set;
-
-        struct {
-            expr_ty elt;
-            asdl_seq *generators;
-        } ListComp;
-
-        struct {
-            expr_ty elt;
-            asdl_seq *generators;
-        } SetComp;
-
-        struct {
-            expr_ty key;
-            expr_ty value;
-            asdl_seq *generators;
-        } DictComp;
-
-        struct {
-            expr_ty elt;
-            asdl_seq *generators;
-        } GeneratorExp;
-
-        struct {
-            expr_ty value;
-        } Await;
-
-        struct {
-            expr_ty value;
-        } Yield;
-
-        struct {
-            expr_ty value;
-        } YieldFrom;
-
-        struct {
-            expr_ty left;
-            asdl_int_seq *ops;
-            asdl_seq *comparators;
-        } Compare;
-
-        struct {
-            expr_ty func;
-            asdl_seq *args;
-            asdl_seq *keywords;
-        } Call;
-
-        struct {
-            expr_ty value;
-            int conversion;
-            expr_ty format_spec;
-        } FormattedValue;
-
-        struct {
-            asdl_seq *values;
-        } JoinedStr;
-
-        struct {
-            constant value;
-            string kind;
-        } Constant;
-
-        struct {
-            expr_ty value;
-            identifier attr;
-            expr_context_ty ctx;
-        } Attribute;
-
-        struct {
-            expr_ty value;
-            expr_ty slice;
-            expr_context_ty ctx;
-        } Subscript;
-
-        struct {
-            expr_ty value;
-            expr_context_ty ctx;
-        } Starred;
-
-        struct {
-            identifier id;
-            expr_context_ty ctx;
-        } Name;
-
-        struct {
-            asdl_seq *elts;
-            expr_context_ty ctx;
-        } List;
-
-        struct {
-            asdl_seq *elts;
-            expr_context_ty ctx;
-        } Tuple;
-
-        struct {
-            expr_ty lower;
-            expr_ty upper;
-            expr_ty step;
-        } Slice;
-
-    } v;
-    int lineno;
-    int col_offset;
-    int end_lineno;
-    int end_col_offset;
-};
-
-struct _comprehension {
-    expr_ty target;
-    expr_ty iter;
-    asdl_seq *ifs;
-    int is_async;
-};
-
-enum _excepthandler_kind {ExceptHandler_kind=1};
-struct _excepthandler {
-    enum _excepthandler_kind kind;
-    union {
-        struct {
-            expr_ty type;
-            identifier name;
-            asdl_seq *body;
-        } ExceptHandler;
-
-    } v;
-    int lineno;
-    int col_offset;
-    int end_lineno;
-    int end_col_offset;
-};
-
-struct _arguments {
-    asdl_seq *posonlyargs;
-    asdl_seq *args;
-    arg_ty vararg;
-    asdl_seq *kwonlyargs;
-    asdl_seq *kw_defaults;
-    arg_ty kwarg;
-    asdl_seq *defaults;
-};
-
-struct _arg {
-    identifier arg;
-    expr_ty annotation;
-    string type_comment;
-    int lineno;
-    int col_offset;
-    int end_lineno;
-    int end_col_offset;
-};
-
-struct _keyword {
-    identifier arg;
-    expr_ty value;
-    int lineno;
-    int col_offset;
-    int end_lineno;
-    int end_col_offset;
-};
-
-struct _alias {
-    identifier name;
-    identifier asname;
-};
-
-struct _withitem {
-    expr_ty context_expr;
-    expr_ty optional_vars;
-};
-
-enum _type_ignore_kind {TypeIgnore_kind=1};
-struct _type_ignore {
-    enum _type_ignore_kind kind;
-    union {
-        struct {
-            int lineno;
-            string tag;
-        } TypeIgnore;
-
-    } v;
-};
-
-
-// Note: these macros affect function definitions, not only call sites.
-#define Module(a0, a1, a2) _Py_Module(a0, a1, a2)
-mod_ty _Py_Module(asdl_seq * body, asdl_seq * type_ignores, PyArena *arena);
-#define Interactive(a0, a1) _Py_Interactive(a0, a1)
-mod_ty _Py_Interactive(asdl_seq * body, PyArena *arena);
-#define Expression(a0, a1) _Py_Expression(a0, a1)
-mod_ty _Py_Expression(expr_ty body, PyArena *arena);
-#define FunctionType(a0, a1, a2) _Py_FunctionType(a0, a1, a2)
-mod_ty _Py_FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena);
-#define FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) _Py_FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
-stmt_ty _Py_FunctionDef(identifier name, arguments_ty args, asdl_seq * body,
-                        asdl_seq * decorator_list, expr_ty returns, string
-                        type_comment, int lineno, int col_offset, int
-                        end_lineno, int end_col_offset, PyArena *arena);
-#define AsyncFunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) _Py_AsyncFunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
-stmt_ty _Py_AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq *
-                             body, asdl_seq * decorator_list, expr_ty returns,
-                             string type_comment, int lineno, int col_offset,
-                             int end_lineno, int end_col_offset, PyArena
-                             *arena);
-#define ClassDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) _Py_ClassDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
-stmt_ty _Py_ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords,
-                     asdl_seq * body, asdl_seq * decorator_list, int lineno,
-                     int col_offset, int end_lineno, int end_col_offset,
-                     PyArena *arena);
-#define Return(a0, a1, a2, a3, a4, a5) _Py_Return(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Return(expr_ty value, int lineno, int col_offset, int end_lineno,
-                   int end_col_offset, PyArena *arena);
-#define Delete(a0, a1, a2, a3, a4, a5) _Py_Delete(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Delete(asdl_seq * targets, int lineno, int col_offset, int
-                   end_lineno, int end_col_offset, PyArena *arena);
-#define Assign(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Assign(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_Assign(asdl_seq * targets, expr_ty value, string type_comment, int
-                   lineno, int col_offset, int end_lineno, int end_col_offset,
-                   PyArena *arena);
-#define AugAssign(a0, a1, a2, a3, a4, a5, a6, a7) _Py_AugAssign(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
-                      lineno, int col_offset, int end_lineno, int
-                      end_col_offset, PyArena *arena);
-#define AnnAssign(a0, a1, a2, a3, a4, a5, a6, a7, a8) _Py_AnnAssign(a0, a1, a2, a3, a4, a5, a6, a7, a8)
-stmt_ty _Py_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
-                      simple, int lineno, int col_offset, int end_lineno, int
-                      end_col_offset, PyArena *arena);
-#define For(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) _Py_For(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
-stmt_ty _Py_For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq *
-                orelse, string type_comment, int lineno, int col_offset, int
-                end_lineno, int end_col_offset, PyArena *arena);
-#define AsyncFor(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) _Py_AsyncFor(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
-stmt_ty _Py_AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq *
-                     orelse, string type_comment, int lineno, int col_offset,
-                     int end_lineno, int end_col_offset, PyArena *arena);
-#define While(a0, a1, a2, a3, a4, a5, a6, a7) _Py_While(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno,
-                  int col_offset, int end_lineno, int end_col_offset, PyArena
-                  *arena);
-#define If(a0, a1, a2, a3, a4, a5, a6, a7) _Py_If(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno,
-               int col_offset, int end_lineno, int end_col_offset, PyArena
-               *arena);
-#define With(a0, a1, a2, a3, a4, a5, a6, a7) _Py_With(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_With(asdl_seq * items, asdl_seq * body, string type_comment, int
-                 lineno, int col_offset, int end_lineno, int end_col_offset,
-                 PyArena *arena);
-#define AsyncWith(a0, a1, a2, a3, a4, a5, a6, a7) _Py_AsyncWith(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_AsyncWith(asdl_seq * items, asdl_seq * body, string type_comment,
-                      int lineno, int col_offset, int end_lineno, int
-                      end_col_offset, PyArena *arena);
-#define Raise(a0, a1, a2, a3, a4, a5, a6) _Py_Raise(a0, a1, a2, a3, a4, a5, a6)
-stmt_ty _Py_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, int
-                  end_lineno, int end_col_offset, PyArena *arena);
-#define Try(a0, a1, a2, a3, a4, a5, a6, a7, a8) _Py_Try(a0, a1, a2, a3, a4, a5, a6, a7, a8)
-stmt_ty _Py_Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse,
-                asdl_seq * finalbody, int lineno, int col_offset, int
-                end_lineno, int end_col_offset, PyArena *arena);
-#define Assert(a0, a1, a2, a3, a4, a5, a6) _Py_Assert(a0, a1, a2, a3, a4, a5, a6)
-stmt_ty _Py_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, int
-                   end_lineno, int end_col_offset, PyArena *arena);
-#define Import(a0, a1, a2, a3, a4, a5) _Py_Import(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Import(asdl_seq * names, int lineno, int col_offset, int
-                   end_lineno, int end_col_offset, PyArena *arena);
-#define ImportFrom(a0, a1, a2, a3, a4, a5, a6, a7) _Py_ImportFrom(a0, a1, a2, a3, a4, a5, a6, a7)
-stmt_ty _Py_ImportFrom(identifier module, asdl_seq * names, int level, int
-                       lineno, int col_offset, int end_lineno, int
-                       end_col_offset, PyArena *arena);
-#define Global(a0, a1, a2, a3, a4, a5) _Py_Global(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Global(asdl_seq * names, int lineno, int col_offset, int
-                   end_lineno, int end_col_offset, PyArena *arena);
-#define Nonlocal(a0, a1, a2, a3, a4, a5) _Py_Nonlocal(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Nonlocal(asdl_seq * names, int lineno, int col_offset, int
-                     end_lineno, int end_col_offset, PyArena *arena);
-#define Expr(a0, a1, a2, a3, a4, a5) _Py_Expr(a0, a1, a2, a3, a4, a5)
-stmt_ty _Py_Expr(expr_ty value, int lineno, int col_offset, int end_lineno, int
-                 end_col_offset, PyArena *arena);
-#define Pass(a0, a1, a2, a3, a4) _Py_Pass(a0, a1, a2, a3, a4)
-stmt_ty _Py_Pass(int lineno, int col_offset, int end_lineno, int
-                 end_col_offset, PyArena *arena);
-#define Break(a0, a1, a2, a3, a4) _Py_Break(a0, a1, a2, a3, a4)
-stmt_ty _Py_Break(int lineno, int col_offset, int end_lineno, int
-                  end_col_offset, PyArena *arena);
-#define Continue(a0, a1, a2, a3, a4) _Py_Continue(a0, a1, a2, a3, a4)
-stmt_ty _Py_Continue(int lineno, int col_offset, int end_lineno, int
-                     end_col_offset, PyArena *arena);
-#define BoolOp(a0, a1, a2, a3, a4, a5, a6) _Py_BoolOp(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset,
-                   int end_lineno, int end_col_offset, PyArena *arena);
-#define NamedExpr(a0, a1, a2, a3, a4, a5, a6) _Py_NamedExpr(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_NamedExpr(expr_ty target, expr_ty value, int lineno, int
-                      col_offset, int end_lineno, int end_col_offset, PyArena
-                      *arena);
-#define BinOp(a0, a1, a2, a3, a4, a5, a6, a7) _Py_BinOp(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int
-                  col_offset, int end_lineno, int end_col_offset, PyArena
-                  *arena);
-#define UnaryOp(a0, a1, a2, a3, a4, a5, a6) _Py_UnaryOp(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset,
-                    int end_lineno, int end_col_offset, PyArena *arena);
-#define Lambda(a0, a1, a2, a3, a4, a5, a6) _Py_Lambda(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset,
-                   int end_lineno, int end_col_offset, PyArena *arena);
-#define IfExp(a0, a1, a2, a3, a4, a5, a6, a7) _Py_IfExp(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int
-                  col_offset, int end_lineno, int end_col_offset, PyArena
-                  *arena);
-#define Dict(a0, a1, a2, a3, a4, a5, a6) _Py_Dict(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Dict(asdl_seq * keys, asdl_seq * values, int lineno, int
-                 col_offset, int end_lineno, int end_col_offset, PyArena
-                 *arena);
-#define Set(a0, a1, a2, a3, a4, a5) _Py_Set(a0, a1, a2, a3, a4, a5)
-expr_ty _Py_Set(asdl_seq * elts, int lineno, int col_offset, int end_lineno,
-                int end_col_offset, PyArena *arena);
-#define ListComp(a0, a1, a2, a3, a4, a5, a6) _Py_ListComp(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_ListComp(expr_ty elt, asdl_seq * generators, int lineno, int
-                     col_offset, int end_lineno, int end_col_offset, PyArena
-                     *arena);
-#define SetComp(a0, a1, a2, a3, a4, a5, a6) _Py_SetComp(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_SetComp(expr_ty elt, asdl_seq * generators, int lineno, int
-                    col_offset, int end_lineno, int end_col_offset, PyArena
-                    *arena);
-#define DictComp(a0, a1, a2, a3, a4, a5, a6, a7) _Py_DictComp(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int
-                     lineno, int col_offset, int end_lineno, int
-                     end_col_offset, PyArena *arena);
-#define GeneratorExp(a0, a1, a2, a3, a4, a5, a6) _Py_GeneratorExp(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int
-                         col_offset, int end_lineno, int end_col_offset,
-                         PyArena *arena);
-#define Await(a0, a1, a2, a3, a4, a5) _Py_Await(a0, a1, a2, a3, a4, a5)
-expr_ty _Py_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
-                  int end_col_offset, PyArena *arena);
-#define Yield(a0, a1, a2, a3, a4, a5) _Py_Yield(a0, a1, a2, a3, a4, a5)
-expr_ty _Py_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
-                  int end_col_offset, PyArena *arena);
-#define YieldFrom(a0, a1, a2, a3, a4, a5) _Py_YieldFrom(a0, a1, a2, a3, a4, a5)
-expr_ty _Py_YieldFrom(expr_ty value, int lineno, int col_offset, int
-                      end_lineno, int end_col_offset, PyArena *arena);
-#define Compare(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Compare(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators,
-                    int lineno, int col_offset, int end_lineno, int
-                    end_col_offset, PyArena *arena);
-#define Call(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Call(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int
-                 lineno, int col_offset, int end_lineno, int end_col_offset,
-                 PyArena *arena);
-#define FormattedValue(a0, a1, a2, a3, a4, a5, a6, a7) _Py_FormattedValue(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_FormattedValue(expr_ty value, int conversion, expr_ty format_spec,
-                           int lineno, int col_offset, int end_lineno, int
-                           end_col_offset, PyArena *arena);
-#define JoinedStr(a0, a1, a2, a3, a4, a5) _Py_JoinedStr(a0, a1, a2, a3, a4, a5)
-expr_ty _Py_JoinedStr(asdl_seq * values, int lineno, int col_offset, int
-                      end_lineno, int end_col_offset, PyArena *arena);
-#define Constant(a0, a1, a2, a3, a4, a5, a6) _Py_Constant(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Constant(constant value, string kind, int lineno, int col_offset,
-                     int end_lineno, int end_col_offset, PyArena *arena);
-#define Attribute(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Attribute(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int
-                      lineno, int col_offset, int end_lineno, int
-                      end_col_offset, PyArena *arena);
-#define Subscript(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Subscript(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
-                      lineno, int col_offset, int end_lineno, int
-                      end_col_offset, PyArena *arena);
-#define Starred(a0, a1, a2, a3, a4, a5, a6) _Py_Starred(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
-                    col_offset, int end_lineno, int end_col_offset, PyArena
-                    *arena);
-#define Name(a0, a1, a2, a3, a4, a5, a6) _Py_Name(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Name(identifier id, expr_context_ty ctx, int lineno, int
-                 col_offset, int end_lineno, int end_col_offset, PyArena
-                 *arena);
-#define List(a0, a1, a2, a3, a4, a5, a6) _Py_List(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_List(asdl_seq * elts, expr_context_ty ctx, int lineno, int
-                 col_offset, int end_lineno, int end_col_offset, PyArena
-                 *arena);
-#define Tuple(a0, a1, a2, a3, a4, a5, a6) _Py_Tuple(a0, a1, a2, a3, a4, a5, a6)
-expr_ty _Py_Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int
-                  col_offset, int end_lineno, int end_col_offset, PyArena
-                  *arena);
-#define Slice(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Slice(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno, int
-                  col_offset, int end_lineno, int end_col_offset, PyArena
-                  *arena);
-#define comprehension(a0, a1, a2, a3, a4) _Py_comprehension(a0, a1, a2, a3, a4)
-comprehension_ty _Py_comprehension(expr_ty target, expr_ty iter, asdl_seq *
-                                   ifs, int is_async, PyArena *arena);
-#define ExceptHandler(a0, a1, a2, a3, a4, a5, a6, a7) _Py_ExceptHandler(a0, a1, a2, a3, a4, a5, a6, a7)
-excepthandler_ty _Py_ExceptHandler(expr_ty type, identifier name, asdl_seq *
-                                   body, int lineno, int col_offset, int
-                                   end_lineno, int end_col_offset, PyArena
-                                   *arena);
-#define arguments(a0, a1, a2, a3, a4, a5, a6, a7) _Py_arguments(a0, a1, a2, a3, a4, a5, a6, a7)
-arguments_ty _Py_arguments(asdl_seq * posonlyargs, asdl_seq * args, arg_ty
-                           vararg, asdl_seq * kwonlyargs, asdl_seq *
-                           kw_defaults, arg_ty kwarg, asdl_seq * defaults,
-                           PyArena *arena);
-#define arg(a0, a1, a2, a3, a4, a5, a6, a7) _Py_arg(a0, a1, a2, a3, a4, a5, a6, a7)
-arg_ty _Py_arg(identifier arg, expr_ty annotation, string type_comment, int
-               lineno, int col_offset, int end_lineno, int end_col_offset,
-               PyArena *arena);
-#define keyword(a0, a1, a2, a3, a4, a5, a6) _Py_keyword(a0, a1, a2, a3, a4, a5, a6)
-keyword_ty _Py_keyword(identifier arg, expr_ty value, int lineno, int
-                       col_offset, int end_lineno, int end_col_offset, PyArena
-                       *arena);
-#define alias(a0, a1, a2) _Py_alias(a0, a1, a2)
-alias_ty _Py_alias(identifier name, identifier asname, PyArena *arena);
-#define withitem(a0, a1, a2) _Py_withitem(a0, a1, a2)
-withitem_ty _Py_withitem(expr_ty context_expr, expr_ty optional_vars, PyArena
-                         *arena);
-#define TypeIgnore(a0, a1, a2) _Py_TypeIgnore(a0, a1, a2)
-type_ignore_ty _Py_TypeIgnore(int lineno, string tag, PyArena *arena);
-
-PyObject* PyAST_mod2obj(mod_ty t);
-mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
-int PyAST_Check(PyObject* obj);
-#endif /* !Py_LIMITED_API */
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_PYTHON_AST_H */
diff --git a/include/Python.h b/include/Python.h
index dcd0a57..d3186c3 100644
--- a/include/Python.h
+++ b/include/Python.h
@@ -35,19 +35,6 @@
 #ifndef MS_WINDOWS
 #include <unistd.h>
 #endif
-#ifdef HAVE_CRYPT_H
-#if defined(HAVE_CRYPT_R) && !defined(_GNU_SOURCE)
-/* Required for glibc to expose the crypt_r() function prototype. */
-#  define _GNU_SOURCE
-#  define _Py_GNU_SOURCE_FOR_CRYPT
-#endif
-#include <crypt.h>
-#ifdef _Py_GNU_SOURCE_FOR_CRYPT
-/* Don't leak the _GNU_SOURCE define to other headers. */
-#  undef _GNU_SOURCE
-#  undef _Py_GNU_SOURCE_FOR_CRYPT
-#endif
-#endif
 
 /* For size_t? */
 #ifdef HAVE_STDDEF_H
@@ -63,26 +50,25 @@
 #include "pyport.h"
 #include "pymacro.h"
 
-/* A convenient way for code to know if clang's memory sanitizer is enabled. */
+/* A convenient way for code to know if sanitizers are enabled. */
 #if defined(__has_feature)
 #  if __has_feature(memory_sanitizer)
 #    if !defined(_Py_MEMORY_SANITIZER)
 #      define _Py_MEMORY_SANITIZER
 #    endif
 #  endif
+#  if __has_feature(address_sanitizer)
+#    if !defined(_Py_ADDRESS_SANITIZER)
+#      define _Py_ADDRESS_SANITIZER
+#    endif
+#  endif
+#elif defined(__GNUC__)
+#  if defined(__SANITIZE_ADDRESS__)
+#    define _Py_ADDRESS_SANITIZER
+#  endif
 #endif
 
-/* Debug-mode build with pymalloc implies PYMALLOC_DEBUG.
- *  PYMALLOC_DEBUG is in error if pymalloc is not in use.
- */
-#if defined(Py_DEBUG) && defined(WITH_PYMALLOC) && !defined(PYMALLOC_DEBUG)
-#define PYMALLOC_DEBUG
-#endif
-#if defined(PYMALLOC_DEBUG) && !defined(WITH_PYMALLOC)
-#error "PYMALLOC_DEBUG requires WITH_PYMALLOC"
-#endif
 #include "pymath.h"
-#include "pytime.h"
 #include "pymem.h"
 
 #include "object.h"
@@ -90,7 +76,7 @@
 #include "typeslots.h"
 #include "pyhash.h"
 
-#include "pydebug.h"
+#include "cpython/pydebug.h"
 
 #include "bytearrayobject.h"
 #include "bytesobject.h"
@@ -105,7 +91,7 @@
 #include "tupleobject.h"
 #include "listobject.h"
 #include "dictobject.h"
-#include "odictobject.h"
+#include "cpython/odictobject.h"
 #include "enumobject.h"
 #include "setobject.h"
 #include "methodobject.h"
@@ -120,6 +106,7 @@
 #include "sliceobject.h"
 #include "cellobject.h"
 #include "iterobject.h"
+#include "cpython/initconfig.h"
 #include "genobject.h"
 #include "descrobject.h"
 #include "genericaliasobject.h"
@@ -127,17 +114,15 @@
 #include "weakrefobject.h"
 #include "structseq.h"
 #include "namespaceobject.h"
-#include "picklebufobject.h"
+#include "cpython/picklebufobject.h"
+#include "cpython/pytime.h"
 
 #include "codecs.h"
 #include "pyerrors.h"
-
-#include "cpython/initconfig.h"
 #include "pythread.h"
 #include "pystate.h"
 #include "context.h"
 
-#include "pyarena.h"
 #include "modsupport.h"
 #include "compile.h"
 #include "pythonrun.h"
@@ -153,11 +138,11 @@
 
 #include "eval.h"
 
-#include "pyctype.h"
+#include "cpython/pyctype.h"
 #include "pystrtod.h"
 #include "pystrcmp.h"
 #include "fileutils.h"
-#include "pyfpe.h"
+#include "cpython/pyfpe.h"
 #include "tracemalloc.h"
 
 #endif /* !Py_PYTHON_H */
diff --git a/include/abstract.h b/include/abstract.h
index bb51c66..9eaab6b 100644
--- a/include/abstract.h
+++ b/include/abstract.h
@@ -318,7 +318,7 @@
 
 /* Takes an arbitrary object which must support the (character, single segment)
    buffer interface and returns a pointer to a read-only memory location
-   useable as character based input for subsequent processing.
+   usable as character based input for subsequent processing.
 
    Return 0 on success.  buffer and buffer_len are only set in case no error
    occurs. Otherwise, -1 is returned and an exception set. */
@@ -371,11 +371,21 @@
    returns itself. */
 PyAPI_FUNC(PyObject *) PyObject_GetIter(PyObject *);
 
-/* Returns 1 if the object 'obj' provides iterator protocols, and 0 otherwise.
+/* Takes an AsyncIterable object and returns an AsyncIterator for it.
+   This is typically a new iterator but if the argument is an AsyncIterator,
+   this returns itself. */
+PyAPI_FUNC(PyObject *) PyObject_GetAIter(PyObject *);
+
+/* Returns non-zero if the object 'obj' provides iterator protocols, and 0 otherwise.
 
    This function always succeeds. */
 PyAPI_FUNC(int) PyIter_Check(PyObject *);
 
+/* Returns non-zero if the object 'obj' provides AsyncIterator protocols, and 0 otherwise.
+
+   This function always succeeds. */
+PyAPI_FUNC(int) PyAIter_Check(PyObject *);
+
 /* Takes an iterator object and calls its tp_iternext slot,
    returning the next value.
 
@@ -385,6 +395,19 @@
    NULL with an exception means an error occurred. */
 PyAPI_FUNC(PyObject *) PyIter_Next(PyObject *);
 
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+
+/* Takes generator, coroutine or iterator object and sends the value into it.
+   Returns:
+   - PYGEN_RETURN (0) if generator has returned.
+     'result' parameter is filled with return value
+   - PYGEN_ERROR (-1) if exception was raised.
+     'result' parameter is NULL
+   - PYGEN_NEXT (1) if generator has yielded.
+     'result' parameter is filled with yielded value. */
+PyAPI_FUNC(PySendResult) PyIter_Send(PyObject *, PyObject *, PyObject **);
+#endif
+
 
 /* === Number Protocol ================================================== */
 
diff --git a/include/asdl.h b/include/asdl.h
deleted file mode 100644
index e962560..0000000
--- a/include/asdl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef Py_LIMITED_API
-#ifndef Py_ASDL_H
-#define Py_ASDL_H
-
-typedef PyObject * identifier;
-typedef PyObject * string;
-typedef PyObject * object;
-typedef PyObject * constant;
-
-/* It would be nice if the code generated by asdl_c.py was completely
-   independent of Python, but it is a goal the requires too much work
-   at this stage.  So, for example, I'll represent identifiers as
-   interned Python strings.
-*/
-
-/* XXX A sequence should be typed so that its use can be typechecked. */
-
-typedef struct {
-    Py_ssize_t size;
-    void *elements[1];
-} asdl_seq;
-
-typedef struct {
-    Py_ssize_t size;
-    int elements[1];
-} asdl_int_seq;
-
-asdl_seq *_Py_asdl_seq_new(Py_ssize_t size, PyArena *arena);
-asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
-
-#define asdl_seq_GET(S, I) (S)->elements[(I)]
-#define asdl_seq_LEN(S) ((S) == NULL ? 0 : (S)->size)
-#ifdef Py_DEBUG
-#define asdl_seq_SET(S, I, V) \
-    do { \
-        Py_ssize_t _asdl_i = (I); \
-        assert((S) != NULL); \
-        assert(0 <= _asdl_i && _asdl_i < (S)->size); \
-        (S)->elements[_asdl_i] = (V); \
-    } while (0)
-#else
-#define asdl_seq_SET(S, I, V) (S)->elements[I] = (V)
-#endif
-
-#endif /* !Py_ASDL_H */
-#endif /* Py_LIMITED_API */
diff --git a/include/ast.h b/include/ast.h
deleted file mode 100644
index a8c52af..0000000
--- a/include/ast.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef Py_LIMITED_API
-#ifndef Py_AST_H
-#define Py_AST_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "Python-ast.h"   /* mod_ty */
-#include "node.h"         /* node */
-
-PyAPI_FUNC(int) PyAST_Validate(mod_ty);
-PyAPI_FUNC(mod_ty) PyAST_FromNode(
-    const node *n,
-    PyCompilerFlags *flags,
-    const char *filename,       /* decoded from the filesystem encoding */
-    PyArena *arena);
-PyAPI_FUNC(mod_ty) PyAST_FromNodeObject(
-    const node *n,
-    PyCompilerFlags *flags,
-    PyObject *filename,
-    PyArena *arena);
-
-/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
-PyAPI_FUNC(PyObject *) _PyAST_ExprAsUnicode(expr_ty);
-
-/* Return the borrowed reference to the first literal string in the
-   sequence of statements or NULL if it doesn't start from a literal string.
-   Doesn't set exception. */
-PyAPI_FUNC(PyObject *) _PyAST_GetDocString(asdl_seq *);
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_AST_H */
-#endif /* !Py_LIMITED_API */
diff --git a/include/bitset.h b/include/bitset.h
deleted file mode 100644
index 6a2ac97..0000000
--- a/include/bitset.h
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#ifndef Py_BITSET_H
-#define Py_BITSET_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Bitset interface */
-
-#define BYTE            char
-typedef BYTE *bitset;
-
-#define testbit(ss, ibit) (((ss)[BIT2BYTE(ibit)] & BIT2MASK(ibit)) != 0)
-
-#define BITSPERBYTE     (8*sizeof(BYTE))
-#define BIT2BYTE(ibit)  ((ibit) / BITSPERBYTE)
-#define BIT2SHIFT(ibit) ((ibit) % BITSPERBYTE)
-#define BIT2MASK(ibit)  (1 << BIT2SHIFT(ibit))
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_BITSET_H */
diff --git a/include/boolobject.h b/include/boolobject.h
index bb8044a..cda6f89 100644
--- a/include/boolobject.h
+++ b/include/boolobject.h
@@ -15,15 +15,24 @@
 Don't forget to apply Py_INCREF() when returning either!!! */
 
 /* Don't use these directly */
-PyAPI_DATA(struct _longobject) _Py_FalseStruct, _Py_TrueStruct;
+PyAPI_DATA(struct _longobject) _Py_FalseStruct;
+PyAPI_DATA(struct _longobject) _Py_TrueStruct;
 
 /* Use these macros */
 #define Py_False ((PyObject *) &_Py_FalseStruct)
 #define Py_True ((PyObject *) &_Py_TrueStruct)
 
+// Test if an object is the True singleton, the same as "x is True" in Python.
+PyAPI_FUNC(int) Py_IsTrue(PyObject *x);
+#define Py_IsTrue(x) Py_Is((x), Py_True)
+
+// Test if an object is the False singleton, the same as "x is False" in Python.
+PyAPI_FUNC(int) Py_IsFalse(PyObject *x);
+#define Py_IsFalse(x) Py_Is((x), Py_False)
+
 /* Macros for returning Py_True or Py_False, respectively */
-#define Py_RETURN_TRUE return Py_INCREF(Py_True), Py_True
-#define Py_RETURN_FALSE return Py_INCREF(Py_False), Py_False
+#define Py_RETURN_TRUE return Py_NewRef(Py_True)
+#define Py_RETURN_FALSE return Py_NewRef(Py_False)
 
 /* Function to return a bool from a C long */
 PyAPI_FUNC(PyObject *) PyBool_FromLong(long);
diff --git a/include/bytesobject.h b/include/bytesobject.h
index 5062d8d..39c241a 100644
--- a/include/bytesobject.h
+++ b/include/bytesobject.h
@@ -1,5 +1,5 @@
 
-/* Bytes (String) object interface */
+/* Bytes object interface */
 
 #ifndef Py_BYTESOBJECT_H
 #define Py_BYTESOBJECT_H
@@ -10,23 +10,20 @@
 #include <stdarg.h>
 
 /*
-Type PyBytesObject represents a character string.  An extra zero byte is
+Type PyBytesObject represents a byte string.  An extra zero byte is
 reserved at the end to ensure it is zero-terminated, but a size is
 present so strings with null bytes in them can be represented.  This
 is an immutable object type.
 
-There are functions to create new string objects, to test
-an object for string-ness, and to get the
-string value.  The latter function returns a null pointer
+There are functions to create new bytes objects, to test
+an object for bytes-ness, and to get the
+byte string value.  The latter function returns a null pointer
 if the object is not of the proper type.
 There is a variant that takes an explicit size as well as a
 variant that assumes a zero-terminated string.  Note that none of the
-functions should be applied to nil objects.
+functions should be applied to NULL pointer.
 */
 
-/* Caching the hash (ob_shash) saves recalculation of a string's hash value.
-   This significantly speeds up dict lookups. */
-
 PyAPI_DATA(PyTypeObject) PyBytes_Type;
 PyAPI_DATA(PyTypeObject) PyBytesIter_Type;
 
@@ -50,26 +47,16 @@
                                             const char *, Py_ssize_t,
                                             const char *);
 
-/* Provides access to the internal data buffer and size of a string
-   object or the default encoded version of a Unicode object. Passing
-   NULL as *len parameter will force the string buffer to be
-   0-terminated (passing a string with embedded NULL characters will
+/* Provides access to the internal data buffer and size of a bytes object.
+   Passing NULL as len parameter will force the string buffer to be
+   0-terminated (passing a string with embedded NUL characters will
    cause an exception).  */
 PyAPI_FUNC(int) PyBytes_AsStringAndSize(
-    PyObject *obj,      /* string or Unicode object */
+    PyObject *obj,      /* bytes object */
     char **s,           /* pointer to buffer variable */
-    Py_ssize_t *len     /* pointer to length variable or NULL
-                           (only possible for 0-terminated
-                           strings) */
+    Py_ssize_t *len     /* pointer to length variable or NULL */
     );
 
-/* Flags used by string formatting */
-#define F_LJUST (1<<0)
-#define F_SIGN  (1<<1)
-#define F_BLANK (1<<2)
-#define F_ALT   (1<<3)
-#define F_ZERO  (1<<4)
-
 #ifndef Py_LIMITED_API
 #  define Py_CPYTHON_BYTESOBJECT_H
 #  include  "cpython/bytesobject.h"
diff --git a/include/cellobject.h b/include/cellobject.h
index f12aa90..81bc784 100644
--- a/include/cellobject.h
+++ b/include/cellobject.h
@@ -20,7 +20,7 @@
 PyAPI_FUNC(int) PyCell_Set(PyObject *, PyObject *);
 
 #define PyCell_GET(op) (((PyCellObject *)(op))->ob_ref)
-#define PyCell_SET(op, v) (((PyCellObject *)(op))->ob_ref = v)
+#define PyCell_SET(op, v) ((void)(((PyCellObject *)(op))->ob_ref = v))
 
 #ifdef __cplusplus
 }
diff --git a/include/ceval.h b/include/ceval.h
index 0f372e2..0f68766 100644
--- a/include/ceval.h
+++ b/include/ceval.h
@@ -67,14 +67,6 @@
 PyAPI_FUNC(int) Py_EnterRecursiveCall(const char *where);
 PyAPI_FUNC(void) Py_LeaveRecursiveCall(void);
 
-#define Py_ALLOW_RECURSION \
-  do { unsigned char _old = PyThreadState_GET()->recursion_critical;\
-    PyThreadState_GET()->recursion_critical = 1;
-
-#define Py_END_ALLOW_RECURSION \
-    PyThreadState_GET()->recursion_critical = _old; \
-  } while(0);
-
 PyAPI_FUNC(const char *) PyEval_GetFuncName(PyObject *);
 PyAPI_FUNC(const char *) PyEval_GetFuncDesc(PyObject *);
 
diff --git a/include/codecs.h b/include/codecs.h
index 3ad0f2b..37ecfb4 100644
--- a/include/codecs.h
+++ b/include/codecs.h
@@ -27,6 +27,14 @@
        PyObject *search_function
        );
 
+/* Unregister a codec search function and clear the registry's cache.
+   If the search function is not registered, do nothing.
+   Return 0 on success. Raise an exception and return -1 on error. */
+
+PyAPI_FUNC(int) PyCodec_Unregister(
+       PyObject *search_function
+       );
+
 /* Codec registry lookup API.
 
    Looks up the given encoding and returns a CodecInfo object with
diff --git a/include/compile.h b/include/compile.h
index 98adee3..3c5acd7 100644
--- a/include/compile.h
+++ b/include/compile.h
@@ -1,109 +1,9 @@
 #ifndef Py_COMPILE_H
 #define Py_COMPILE_H
-
-#ifndef Py_LIMITED_API
-
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-/* Public interface */
-struct _node; /* Declare the existence of this type */
-#ifndef Py_BUILD_CORE
-Py_DEPRECATED(3.9)
-#endif
-PyAPI_FUNC(PyCodeObject *) PyNode_Compile(struct _node *, const char *);
-/* XXX (ncoghlan): Unprefixed type name in a public API! */
-
-#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \
-                   CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \
-                   CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \
-                   CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS)
-#define PyCF_MASK_OBSOLETE (CO_NESTED)
-
-/* bpo-39562: CO_FUTURE_ and PyCF_ constants must be kept unique.
-   PyCF_ constants can use bits from 0x0100 to 0x10000.
-   CO_FUTURE_ constants use bits starting at 0x20000. */
-#define PyCF_SOURCE_IS_UTF8  0x0100
-#define PyCF_DONT_IMPLY_DEDENT 0x0200
-#define PyCF_ONLY_AST 0x0400
-#define PyCF_IGNORE_COOKIE 0x0800
-#define PyCF_TYPE_COMMENTS 0x1000
-#define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000
-#define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \
-                           PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT)
-
-#ifndef Py_LIMITED_API
-typedef struct {
-    int cf_flags;  /* bitmask of CO_xxx flags relevant to future */
-    int cf_feature_version;  /* minor Python version (PyCF_ONLY_AST) */
-} PyCompilerFlags;
-
-#define _PyCompilerFlags_INIT \
-    (PyCompilerFlags){.cf_flags = 0, .cf_feature_version = PY_MINOR_VERSION}
-#endif
-
-/* Future feature support */
-
-typedef struct {
-    int ff_features;      /* flags set by future statements */
-    int ff_lineno;        /* line number of last future statement */
-} PyFutureFeatures;
-
-#define FUTURE_NESTED_SCOPES "nested_scopes"
-#define FUTURE_GENERATORS "generators"
-#define FUTURE_DIVISION "division"
-#define FUTURE_ABSOLUTE_IMPORT "absolute_import"
-#define FUTURE_WITH_STATEMENT "with_statement"
-#define FUTURE_PRINT_FUNCTION "print_function"
-#define FUTURE_UNICODE_LITERALS "unicode_literals"
-#define FUTURE_BARRY_AS_BDFL "barry_as_FLUFL"
-#define FUTURE_GENERATOR_STOP "generator_stop"
-#define FUTURE_ANNOTATIONS "annotations"
-
-struct _mod; /* Declare the existence of this type */
-#define PyAST_Compile(mod, s, f, ar) PyAST_CompileEx(mod, s, f, -1, ar)
-PyAPI_FUNC(PyCodeObject *) PyAST_CompileEx(
-    struct _mod *mod,
-    const char *filename,       /* decoded from the filesystem encoding */
-    PyCompilerFlags *flags,
-    int optimize,
-    PyArena *arena);
-PyAPI_FUNC(PyCodeObject *) PyAST_CompileObject(
-    struct _mod *mod,
-    PyObject *filename,
-    PyCompilerFlags *flags,
-    int optimize,
-    PyArena *arena);
-PyAPI_FUNC(PyFutureFeatures *) PyFuture_FromAST(
-    struct _mod * mod,
-    const char *filename        /* decoded from the filesystem encoding */
-    );
-PyAPI_FUNC(PyFutureFeatures *) PyFuture_FromASTObject(
-    struct _mod * mod,
-    PyObject *filename
-    );
-
-/* _Py_Mangle is defined in compile.c */
-PyAPI_FUNC(PyObject*) _Py_Mangle(PyObject *p, PyObject *name);
-
-#define PY_INVALID_STACK_EFFECT INT_MAX
-PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
-PyAPI_FUNC(int) PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump);
-
-typedef struct {
-    int optimize;
-    int ff_features;
-} _PyASTOptimizeState;
-
-PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena, _PyASTOptimizeState *state);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !Py_LIMITED_API */
-
 /* These definitions must match corresponding definitions in graminit.h. */
 #define Py_single_input 256
 #define Py_file_input 257
@@ -113,4 +13,13 @@
 /* This doesn't need to match anything */
 #define Py_fstring_input 800
 
+#ifndef Py_LIMITED_API
+#  define Py_CPYTHON_COMPILE_H
+#  include "cpython/compile.h"
+#  undef Py_CPYTHON_COMPILE_H
+#endif
+
+#ifdef __cplusplus
+}
+#endif
 #endif /* !Py_COMPILE_H */
diff --git a/include/cpython/abstract.h b/include/cpython/abstract.h
index 7bc8083..db85021 100644
--- a/include/cpython/abstract.h
+++ b/include/cpython/abstract.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* === Object Protocol ================================================== */
 
 #ifdef PY_SSIZE_T_CLEAN
@@ -67,7 +63,7 @@
 {
     PyTypeObject *tp;
     Py_ssize_t offset;
-    vectorcallfunc *ptr;
+    vectorcallfunc ptr;
 
     assert(callable != NULL);
     tp = Py_TYPE(callable);
@@ -77,8 +73,8 @@
     assert(PyCallable_Check(callable));
     offset = tp->tp_vectorcall_offset;
     assert(offset > 0);
-    ptr = (vectorcallfunc *)(((char *)callable) + offset);
-    return *ptr;
+    memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
+    return ptr;
 }
 
 /* Call the callable object 'callable' with the "vectorcall" calling
@@ -123,7 +119,7 @@
 PyObject_Vectorcall(PyObject *callable, PyObject *const *args,
                      size_t nargsf, PyObject *kwnames)
 {
-    PyThreadState *tstate = PyThreadState_GET();
+    PyThreadState *tstate = PyThreadState_Get();
     return _PyObject_VectorcallTstate(tstate, callable,
                                       args, nargsf, kwnames);
 }
@@ -159,7 +155,7 @@
 static inline PyObject *
 _PyObject_FastCall(PyObject *func, PyObject *const *args, Py_ssize_t nargs)
 {
-    PyThreadState *tstate = PyThreadState_GET();
+    PyThreadState *tstate = PyThreadState_Get();
     return _PyObject_FastCallTstate(tstate, func, args, nargs);
 }
 
@@ -168,7 +164,7 @@
    PyObject_CallNoArgs(). */
 static inline PyObject *
 _PyObject_CallNoArg(PyObject *func) {
-    PyThreadState *tstate = PyThreadState_GET();
+    PyThreadState *tstate = PyThreadState_Get();
     return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
 }
 
@@ -183,7 +179,7 @@
     assert(arg != NULL);
     args = _args + 1;  // For PY_VECTORCALL_ARGUMENTS_OFFSET
     args[0] = arg;
-    tstate = PyThreadState_GET();
+    tstate = PyThreadState_Get();
     nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
     return _PyObject_VectorcallTstate(tstate, func, args, nargsf, NULL);
 }
@@ -329,12 +325,6 @@
 /* Releases a Py_buffer obtained from getbuffer ParseTuple's "s*". */
 PyAPI_FUNC(void) PyBuffer_Release(Py_buffer *view);
 
-/* ==== Iterators ================================================ */
-
-#define PyIter_Check(obj) \
-    (Py_TYPE(obj)->tp_iternext != NULL && \
-     Py_TYPE(obj)->tp_iternext != &_PyObject_NextNotImplemented)
-
 /* === Sequence protocol ================================================ */
 
 /* Assume tp_as_sequence and sq_item exist and that 'i' does not
@@ -379,6 +369,5 @@
 /* Convert Python int to Py_ssize_t. Do nothing if the argument is None. */
 PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *);
 
-#ifdef __cplusplus
-}
-#endif
+/* Same as PyNumber_Index but can return an instance of a subclass of int. */
+PyAPI_FUNC(PyObject *) _PyNumber_Index(PyObject *o);
diff --git a/include/cpython/bytesobject.h b/include/cpython/bytesobject.h
index f284c58..6b3f552 100644
--- a/include/cpython/bytesobject.h
+++ b/include/cpython/bytesobject.h
@@ -10,7 +10,7 @@
     /* Invariants:
      *     ob_sval contains space for 'ob_size+1' elements.
      *     ob_sval[ob_size] == 0.
-     *     ob_shash is the hash of the string or -1 if not computed yet.
+     *     ob_shash is the hash of the byte string or -1 if not computed yet.
      */
 } PyBytesObject;
 
diff --git a/include/cpython/ceval.h b/include/cpython/ceval.h
index e1922a6..0633892 100644
--- a/include/cpython/ceval.h
+++ b/include/cpython/ceval.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *);
 PyAPI_DATA(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
 PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *);
@@ -32,7 +28,3 @@
 
 PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
 PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/code.h b/include/cpython/code.h
index cda28ac..fa64085 100644
--- a/include/cpython/code.h
+++ b/include/cpython/code.h
@@ -38,7 +38,7 @@
     Py_ssize_t *co_cell2arg;    /* Maps cell vars which are arguments. */
     PyObject *co_filename;      /* unicode (where it was loaded from) */
     PyObject *co_name;          /* unicode (name, for reference) */
-    PyObject *co_lnotab;        /* string (encoding addr<->lineno mapping) See
+    PyObject *co_linetable;     /* string (encoding addr<->lineno mapping) See
                                    Objects/lnotab_notes.txt for details. */
     void *co_zombieframe;       /* for optimization only (see frameobject.c) */
     PyObject *co_weakreflist;   /* to support weakrefs to code objects */
@@ -135,16 +135,23 @@
 PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int);
 
 /* for internal use only */
-typedef struct _addr_pair {
-        int ap_lower;
-        int ap_upper;
-} PyAddrPair;
+struct _opaque {
+    int computed_line;
+    const char *lo_next;
+    const char *limit;
+};
+
+typedef struct _line_offsets {
+    int ar_start;
+    int ar_end;
+    int ar_line;
+    struct _opaque opaque;
+} PyCodeAddressRange;
 
 /* Update *bounds to describe the first and one-past-the-last instructions in the
    same line as lasti.  Return the number of that line.
 */
-PyAPI_FUNC(int) _PyCode_CheckLineNumber(PyCodeObject* co,
-                                        int lasti, PyAddrPair *bounds);
+PyAPI_FUNC(int) _PyCode_CheckLineNumber(int lasti, PyCodeAddressRange *bounds);
 
 /* Create a comparable key used to compare constants taking in account the
  * object type. It is used to make sure types are not coerced (e.g., float and
@@ -163,3 +170,15 @@
                                  void **extra);
 PyAPI_FUNC(int) _PyCode_SetExtra(PyObject *code, Py_ssize_t index,
                                  void *extra);
+
+/** API for initializing the line number table. */
+int _PyCode_InitAddressRange(PyCodeObject* co, PyCodeAddressRange *bounds);
+
+/** Out of process API for initializing the line number table. */
+void PyLineTable_InitAddressRange(const char *linetable, Py_ssize_t length, int firstlineno, PyCodeAddressRange *range);
+
+/** API for traversing the line number table. */
+int PyLineTable_NextAddressRange(PyCodeAddressRange *range);
+int PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
+
+
diff --git a/include/cpython/compile.h b/include/cpython/compile.h
new file mode 100644
index 0000000..518a376
--- /dev/null
+++ b/include/cpython/compile.h
@@ -0,0 +1,54 @@
+#ifndef Py_CPYTHON_COMPILE_H
+#  error "this header file must not be included directly"
+#endif
+
+/* Public interface */
+#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \
+                   CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \
+                   CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \
+                   CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS)
+#define PyCF_MASK_OBSOLETE (CO_NESTED)
+
+/* bpo-39562: CO_FUTURE_ and PyCF_ constants must be kept unique.
+   PyCF_ constants can use bits from 0x0100 to 0x10000.
+   CO_FUTURE_ constants use bits starting at 0x20000. */
+#define PyCF_SOURCE_IS_UTF8  0x0100
+#define PyCF_DONT_IMPLY_DEDENT 0x0200
+#define PyCF_ONLY_AST 0x0400
+#define PyCF_IGNORE_COOKIE 0x0800
+#define PyCF_TYPE_COMMENTS 0x1000
+#define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000
+#define PyCF_ALLOW_INCOMPLETE_INPUT 0x4000
+#define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \
+                           PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT | \
+                           PyCF_ALLOW_INCOMPLETE_INPUT)
+
+typedef struct {
+    int cf_flags;  /* bitmask of CO_xxx flags relevant to future */
+    int cf_feature_version;  /* minor Python version (PyCF_ONLY_AST) */
+} PyCompilerFlags;
+
+#define _PyCompilerFlags_INIT \
+    (PyCompilerFlags){.cf_flags = 0, .cf_feature_version = PY_MINOR_VERSION}
+
+/* Future feature support */
+
+typedef struct {
+    int ff_features;      /* flags set by future statements */
+    int ff_lineno;        /* line number of last future statement */
+} PyFutureFeatures;
+
+#define FUTURE_NESTED_SCOPES "nested_scopes"
+#define FUTURE_GENERATORS "generators"
+#define FUTURE_DIVISION "division"
+#define FUTURE_ABSOLUTE_IMPORT "absolute_import"
+#define FUTURE_WITH_STATEMENT "with_statement"
+#define FUTURE_PRINT_FUNCTION "print_function"
+#define FUTURE_UNICODE_LITERALS "unicode_literals"
+#define FUTURE_BARRY_AS_BDFL "barry_as_FLUFL"
+#define FUTURE_GENERATOR_STOP "generator_stop"
+#define FUTURE_ANNOTATIONS "annotations"
+
+#define PY_INVALID_STACK_EFFECT INT_MAX
+PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
+PyAPI_FUNC(int) PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump);
diff --git a/include/cpython/dictobject.h b/include/cpython/dictobject.h
index e33a0d1..641d7bd 100644
--- a/include/cpython/dictobject.h
+++ b/include/cpython/dictobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct _dictkeysobject PyDictKeysObject;
 
 /* The ma_values pointer is NULL for a combined table
@@ -26,7 +22,7 @@
     /* If ma_values is NULL, the table is "combined": keys and values
        are stored in ma_keys.
 
-       If ma_values is not NULL, the table is splitted:
+       If ma_values is not NULL, the table is split:
        keys are stored in ma_keys and values are stored in ma_values */
     PyObject **ma_values;
 } PyDictObject;
@@ -45,13 +41,13 @@
 PyAPI_FUNC(int) _PyDict_DelItemIf(PyObject *mp, PyObject *key,
                                   int (*predicate)(PyObject *value));
 PyDictKeysObject *_PyDict_NewKeysForClass(void);
-PyAPI_FUNC(PyObject *) PyObject_GenericGetDict(PyObject *, void *);
 PyAPI_FUNC(int) _PyDict_Next(
     PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash);
 
 /* Get the number of items of a dictionary. */
 #define PyDict_GET_SIZE(mp)  (assert(PyDict_Check(mp)),((PyDictObject *)mp)->ma_used)
-PyAPI_FUNC(int) _PyDict_Contains(PyObject *mp, PyObject *key, Py_hash_t hash);
+PyAPI_FUNC(int) _PyDict_Contains_KnownHash(PyObject *, PyObject *, Py_hash_t);
+PyAPI_FUNC(int) _PyDict_ContainsId(PyObject *, struct _Py_Identifier *);
 PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused);
 PyAPI_FUNC(void) _PyDict_MaybeUntrack(PyObject *mp);
 PyAPI_FUNC(int) _PyDict_HasOnlyStringKeys(PyObject *mp);
@@ -68,7 +64,6 @@
    argument is raised.
 */
 PyAPI_FUNC(int) _PyDict_MergeEx(PyObject *mp, PyObject *other, int override);
-PyAPI_FUNC(PyObject *) _PyDict_GetItemId(PyObject *dp, struct _Py_Identifier *key);
 PyAPI_FUNC(int) _PyDict_SetItemId(PyObject *dp, struct _Py_Identifier *key, PyObject *item);
 
 PyAPI_FUNC(int) _PyDict_DelItemId(PyObject *mp, struct _Py_Identifier *key);
@@ -76,6 +71,7 @@
 
 int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject **dictptr, PyObject *name, PyObject *value);
 PyObject *_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *);
+Py_ssize_t _PyDict_GetItemHint(PyDictObject *, PyObject *, Py_ssize_t, PyObject **);
 
 /* _PyDictView */
 
@@ -86,7 +82,3 @@
 
 PyAPI_FUNC(PyObject *) _PyDictView_New(PyObject *, PyTypeObject *);
 PyAPI_FUNC(PyObject *) _PyDictView_Intersect(PyObject* self, PyObject *other);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/fileobject.h b/include/cpython/fileobject.h
index 3005ce1..cff2243 100644
--- a/include/cpython/fileobject.h
+++ b/include/cpython/fileobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *);
 
 /* The std printer acts as a preliminary sys.stderr until the new io
@@ -19,6 +15,4 @@
 PyAPI_FUNC(PyObject *) PyFile_OpenCodeObject(PyObject *path);
 PyAPI_FUNC(int) PyFile_SetOpenCodeHook(Py_OpenCodeHookFunction hook, void *userData);
 
-#ifdef __cplusplus
-}
-#endif
+PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *);
diff --git a/include/cpython/fileutils.h b/include/cpython/fileutils.h
index e79d03e..ccf37e9 100644
--- a/include/cpython/fileutils.h
+++ b/include/cpython/fileutils.h
@@ -32,6 +32,9 @@
     int current_locale,
     _Py_error_handler errors);
 
+PyAPI_FUNC(char*) _Py_EncodeLocaleRaw(
+    const wchar_t *text,
+    size_t *error_pos);
 
 PyAPI_FUNC(PyObject *) _Py_device_encoding(int);
 
@@ -95,10 +98,6 @@
     const wchar_t *path,
     const wchar_t *mode);
 
-PyAPI_FUNC(FILE*) _Py_fopen(
-    const char *pathname,
-    const char *mode);
-
 PyAPI_FUNC(FILE*) _Py_fopen_obj(
     PyObject *path,
     const char *mode);
@@ -162,4 +161,12 @@
 PyAPI_FUNC(int) _Py_get_blocking(int fd);
 
 PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking);
-#endif   /* !MS_WINDOWS */
+#else   /* MS_WINDOWS */
+PyAPI_FUNC(void*) _Py_get_osfhandle_noraise(int fd);
+
+PyAPI_FUNC(void*) _Py_get_osfhandle(int fd);
+
+PyAPI_FUNC(int) _Py_open_osfhandle_noraise(void *handle, int flags);
+
+PyAPI_FUNC(int) _Py_open_osfhandle(void *handle, int flags);
+#endif  /* MS_WINDOWS */
diff --git a/include/cpython/frameobject.h b/include/cpython/frameobject.h
index 36a51ba..5122ec4 100644
--- a/include/cpython/frameobject.h
+++ b/include/cpython/frameobject.h
@@ -4,9 +4,20 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* These values are chosen so that the inline functions below all
+ * compare f_state to zero.
+ */
+enum _framestate {
+    FRAME_CREATED = -2,
+    FRAME_SUSPENDED = -1,
+    FRAME_EXECUTING = 0,
+    FRAME_RETURNED = 1,
+    FRAME_UNWINDING = 2,
+    FRAME_RAISED = 3,
+    FRAME_CLEARED = 4
+};
+
+typedef signed char PyFrameState;
 
 typedef struct {
     int b_type;                 /* what kind of block this is */
@@ -22,11 +33,8 @@
     PyObject *f_globals;        /* global symbol table (PyDictObject) */
     PyObject *f_locals;         /* local symbol table (any mapping) */
     PyObject **f_valuestack;    /* points after the last local */
-    /* Next free slot in f_valuestack.  Frame creation sets to f_valuestack.
-       Frame evaluation usually NULLs it, but a frame that yields sets it
-       to the current stack top. */
-    PyObject **f_stacktop;
     PyObject *f_trace;          /* Trace function */
+    int f_stackdepth;           /* Depth of value stack */
     char f_trace_lines;         /* Emit per-line trace events? */
     char f_trace_opcodes;       /* Emit per-opcode trace events? */
 
@@ -34,18 +42,24 @@
     PyObject *f_gen;
 
     int f_lasti;                /* Last instruction if called */
-    /* Call PyFrame_GetLineNumber() instead of reading this field
-       directly.  As of 2.3 f_lineno is only valid when tracing is
-       active (i.e. when f_trace is set).  At other times we use
-       PyCode_Addr2Line to calculate the line from the current
-       bytecode index. */
-    int f_lineno;               /* Current line number */
+    int f_lineno;               /* Current line number. Only valid if non-zero */
     int f_iblock;               /* index in f_blockstack */
-    char f_executing;           /* whether the frame is still executing */
+    PyFrameState f_state;       /* What state the frame is in */
     PyTryBlock f_blockstack[CO_MAXBLOCKS]; /* for try and loop blocks */
     PyObject *f_localsplus[1];  /* locals+stack, dynamically sized */
 };
 
+static inline int _PyFrame_IsRunnable(struct _frame *f) {
+    return f->f_state < FRAME_EXECUTING;
+}
+
+static inline int _PyFrame_IsExecuting(struct _frame *f) {
+    return f->f_state == FRAME_EXECUTING;
+}
+
+static inline int _PyFrameHasCompleted(struct _frame *f) {
+    return f->f_state > FRAME_EXECUTING;
+}
 
 /* Standard object interface */
 
@@ -57,8 +71,8 @@
                                         PyObject *, PyObject *);
 
 /* only internal use */
-PyFrameObject* _PyFrame_New_NoTrack(PyThreadState *, PyCodeObject *,
-                                    PyObject *, PyObject *);
+PyFrameObject*
+_PyFrame_New_NoTrack(PyThreadState *, PyFrameConstructor *, PyObject *);
 
 
 /* The rest of the interface is specific for frame objects */
@@ -78,7 +92,3 @@
 PyAPI_FUNC(void) _PyFrame_DebugMallocStats(FILE *out);
 
 PyAPI_FUNC(PyFrameObject *) PyFrame_GetBack(PyFrameObject *frame);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/import.h b/include/cpython/import.h
index c1b4712..dd5bbdb 100644
--- a/include/cpython/import.h
+++ b/include/cpython/import.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyMODINIT_FUNC PyInit__imp(void);
 
 PyAPI_FUNC(int) _PyImport_IsInitialized(PyInterpreterState *);
@@ -17,7 +13,8 @@
 PyAPI_FUNC(void) _PyImport_AcquireLock(void);
 PyAPI_FUNC(int) _PyImport_ReleaseLock(void);
 
-PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObject(PyObject *, PyObject *);
+/* Obsolete since 3.5, will be removed in 3.11. */
+Py_DEPRECATED(3.10) PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObject(PyObject *, PyObject *);
 
 PyAPI_FUNC(int) _PyImport_FixupBuiltin(
     PyObject *mod,
@@ -44,7 +41,3 @@
    collection of frozen modules: */
 
 PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules;
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/initconfig.h b/include/cpython/initconfig.h
index 0a256d4..583165b 100644
--- a/include/cpython/initconfig.h
+++ b/include/cpython/initconfig.h
@@ -44,7 +44,7 @@
 
 /* --- PyPreConfig ----------------------------------------------- */
 
-typedef struct {
+typedef struct PyPreConfig {
     int _config_init;     /* _PyConfigInitEnum value */
 
     /* Parse Py_PreInitializeFromBytesArgs() arguments?
@@ -130,298 +130,85 @@
 
 /* --- PyConfig ---------------------------------------------- */
 
-typedef struct {
+/* This structure is best documented in the Doc/c-api/init_config.rst file. */
+typedef struct PyConfig {
     int _config_init;     /* _PyConfigInitEnum value */
 
-    int isolated;         /* Isolated mode? see PyPreConfig.isolated */
-    int use_environment;  /* Use environment variables? see PyPreConfig.use_environment */
-    int dev_mode;         /* Python Development Mode? See PyPreConfig.dev_mode */
-
-    /* Install signal handlers? Yes by default. */
+    int isolated;
+    int use_environment;
+    int dev_mode;
     int install_signal_handlers;
-
-    int use_hash_seed;      /* PYTHONHASHSEED=x */
+    int use_hash_seed;
     unsigned long hash_seed;
-
-    /* Enable faulthandler?
-       Set to 1 by -X faulthandler and PYTHONFAULTHANDLER. -1 means unset. */
     int faulthandler;
-
-    /* Enable PEG parser?
-       1 by default, set to 0 by -X oldparser and PYTHONOLDPARSER */
-    int _use_peg_parser;
-
-    /* Enable tracemalloc?
-       Set by -X tracemalloc=N and PYTHONTRACEMALLOC. -1 means unset */
     int tracemalloc;
-
-    int import_time;        /* PYTHONPROFILEIMPORTTIME, -X importtime */
-    int show_ref_count;     /* -X showrefcount */
-    int dump_refs;          /* PYTHONDUMPREFS */
-    int malloc_stats;       /* PYTHONMALLOCSTATS */
-
-    /* Python filesystem encoding and error handler:
-       sys.getfilesystemencoding() and sys.getfilesystemencodeerrors().
-
-       Default encoding and error handler:
-
-       * if Py_SetStandardStreamEncoding() has been called: they have the
-         highest priority;
-       * PYTHONIOENCODING environment variable;
-       * The UTF-8 Mode uses UTF-8/surrogateescape;
-       * If Python forces the usage of the ASCII encoding (ex: C locale
-         or POSIX locale on FreeBSD or HP-UX), use ASCII/surrogateescape;
-       * locale encoding: ANSI code page on Windows, UTF-8 on Android and
-         VxWorks, LC_CTYPE locale encoding on other platforms;
-       * On Windows, "surrogateescape" error handler;
-       * "surrogateescape" error handler if the LC_CTYPE locale is "C" or "POSIX";
-       * "surrogateescape" error handler if the LC_CTYPE locale has been coerced
-         (PEP 538);
-       * "strict" error handler.
-
-       Supported error handlers: "strict", "surrogateescape" and
-       "surrogatepass". The surrogatepass error handler is only supported
-       if Py_DecodeLocale() and Py_EncodeLocale() use directly the UTF-8 codec;
-       it's only used on Windows.
-
-       initfsencoding() updates the encoding to the Python codec name.
-       For example, "ANSI_X3.4-1968" is replaced with "ascii".
-
-       On Windows, sys._enablelegacywindowsfsencoding() sets the
-       encoding/errors to mbcs/replace at runtime.
-
-
-       See Py_FileSystemDefaultEncoding and Py_FileSystemDefaultEncodeErrors.
-       */
+    int import_time;
+    int show_ref_count;
+    int dump_refs;
+    int malloc_stats;
     wchar_t *filesystem_encoding;
     wchar_t *filesystem_errors;
-
-    wchar_t *pycache_prefix;  /* PYTHONPYCACHEPREFIX, -X pycache_prefix=PATH */
-    int parse_argv;           /* Parse argv command line arguments? */
-
-    /* Command line arguments (sys.argv).
-
-       Set parse_argv to 1 to parse argv as Python command line arguments
-       and then strip Python arguments from argv.
-
-       If argv is empty, an empty string is added to ensure that sys.argv
-       always exists and is never empty. */
+    wchar_t *pycache_prefix;
+    int parse_argv;
+    PyWideStringList orig_argv;
     PyWideStringList argv;
-
-    /* Program name:
-
-       - If Py_SetProgramName() was called, use its value.
-       - On macOS, use PYTHONEXECUTABLE environment variable if set.
-       - If WITH_NEXT_FRAMEWORK macro is defined, use __PYVENV_LAUNCHER__
-         environment variable is set.
-       - Use argv[0] if available and non-empty.
-       - Use "python" on Windows, or "python3 on other platforms. */
-    wchar_t *program_name;
-
-    PyWideStringList xoptions;     /* Command line -X options */
-
-    /* Warnings options: lowest to highest priority. warnings.filters
-       is built in the reverse order (highest to lowest priority). */
+    PyWideStringList xoptions;
     PyWideStringList warnoptions;
-
-    /* If equal to zero, disable the import of the module site and the
-       site-dependent manipulations of sys.path that it entails. Also disable
-       these manipulations if site is explicitly imported later (call
-       site.main() if you want them to be triggered).
-
-       Set to 0 by the -S command line option. If set to -1 (default), it is
-       set to !Py_NoSiteFlag. */
     int site_import;
-
-    /* Bytes warnings:
-
-       * If equal to 1, issue a warning when comparing bytes or bytearray with
-         str or bytes with int.
-       * If equal or greater to 2, issue an error.
-
-       Incremented by the -b command line option. If set to -1 (default), inherit
-       Py_BytesWarningFlag value. */
     int bytes_warning;
-
-    /* If greater than 0, enable inspect: when a script is passed as first
-       argument or the -c option is used, enter interactive mode after
-       executing the script or the command, even when sys.stdin does not appear
-       to be a terminal.
-
-       Incremented by the -i command line option. Set to 1 if the PYTHONINSPECT
-       environment variable is non-empty. If set to -1 (default), inherit
-       Py_InspectFlag value. */
+    int warn_default_encoding;
     int inspect;
-
-    /* If greater than 0: enable the interactive mode (REPL).
-
-       Incremented by the -i command line option. If set to -1 (default),
-       inherit Py_InteractiveFlag value. */
     int interactive;
-
-    /* Optimization level.
-
-       Incremented by the -O command line option. Set by the PYTHONOPTIMIZE
-       environment variable. If set to -1 (default), inherit Py_OptimizeFlag
-       value. */
     int optimization_level;
-
-    /* If greater than 0, enable the debug mode: turn on parser debugging
-       output (for expert only, depending on compilation options).
-
-       Incremented by the -d command line option. Set by the PYTHONDEBUG
-       environment variable. If set to -1 (default), inherit Py_DebugFlag
-       value. */
     int parser_debug;
-
-    /* If equal to 0, Python won't try to write ``.pyc`` files on the
-       import of source modules.
-
-       Set to 0 by the -B command line option and the PYTHONDONTWRITEBYTECODE
-       environment variable. If set to -1 (default), it is set to
-       !Py_DontWriteBytecodeFlag. */
     int write_bytecode;
-
-    /* If greater than 0, enable the verbose mode: print a message each time a
-       module is initialized, showing the place (filename or built-in module)
-       from which it is loaded.
-
-       If greater or equal to 2, print a message for each file that is checked
-       for when searching for a module. Also provides information on module
-       cleanup at exit.
-
-       Incremented by the -v option. Set by the PYTHONVERBOSE environment
-       variable. If set to -1 (default), inherit Py_VerboseFlag value. */
     int verbose;
-
-    /* If greater than 0, enable the quiet mode: Don't display the copyright
-       and version messages even in interactive mode.
-
-       Incremented by the -q option. If set to -1 (default), inherit
-       Py_QuietFlag value. */
     int quiet;
-
-   /* If greater than 0, don't add the user site-packages directory to
-      sys.path.
-
-      Set to 0 by the -s and -I command line options , and the PYTHONNOUSERSITE
-      environment variable. If set to -1 (default), it is set to
-      !Py_NoUserSiteDirectory. */
     int user_site_directory;
-
-    /* If non-zero, configure C standard steams (stdio, stdout,
-       stderr):
-
-       - Set O_BINARY mode on Windows.
-       - If buffered_stdio is equal to zero, make streams unbuffered.
-         Otherwise, enable streams buffering if interactive is non-zero. */
     int configure_c_stdio;
-
-    /* If equal to 0, enable unbuffered mode: force the stdout and stderr
-       streams to be unbuffered.
-
-       Set to 0 by the -u option. Set by the PYTHONUNBUFFERED environment
-       variable.
-       If set to -1 (default), it is set to !Py_UnbufferedStdioFlag. */
     int buffered_stdio;
-
-    /* Encoding of sys.stdin, sys.stdout and sys.stderr.
-       Value set from PYTHONIOENCODING environment variable and
-       Py_SetStandardStreamEncoding() function.
-       See also 'stdio_errors' attribute. */
     wchar_t *stdio_encoding;
-
-    /* Error handler of sys.stdin and sys.stdout.
-       Value set from PYTHONIOENCODING environment variable and
-       Py_SetStandardStreamEncoding() function.
-       See also 'stdio_encoding' attribute. */
     wchar_t *stdio_errors;
-
 #ifdef MS_WINDOWS
-    /* If greater than zero, use io.FileIO instead of WindowsConsoleIO for sys
-       standard streams.
-
-       Set to 1 if the PYTHONLEGACYWINDOWSSTDIO environment variable is set to
-       a non-empty string. If set to -1 (default), inherit
-       Py_LegacyWindowsStdioFlag value.
-
-       See PEP 528 for more details. */
     int legacy_windows_stdio;
 #endif
-
-    /* Value of the --check-hash-based-pycs command line option:
-
-       - "default" means the 'check_source' flag in hash-based pycs
-         determines invalidation
-       - "always" causes the interpreter to hash the source file for
-         invalidation regardless of value of 'check_source' bit
-       - "never" causes the interpreter to always assume hash-based pycs are
-         valid
-
-       The default value is "default".
-
-       See PEP 552 "Deterministic pycs" for more details. */
     wchar_t *check_hash_pycs_mode;
 
     /* --- Path configuration inputs ------------ */
-
-    /* If greater than 0, suppress _PyPathConfig_Calculate() warnings on Unix.
-       The parameter has no effect on Windows.
-
-       If set to -1 (default), inherit !Py_FrozenFlag value. */
     int pathconfig_warnings;
-
-    wchar_t *pythonpath_env; /* PYTHONPATH environment variable */
-    wchar_t *home;          /* PYTHONHOME environment variable,
-                               see also Py_SetPythonHome(). */
+    wchar_t *program_name;
+    wchar_t *pythonpath_env;
+    wchar_t *home;
+    wchar_t *platlibdir;
 
     /* --- Path configuration outputs ----------- */
-
-    int module_search_paths_set;  /* If non-zero, use module_search_paths */
-    PyWideStringList module_search_paths;  /* sys.path paths. Computed if
-                                       module_search_paths_set is equal
-                                       to zero. */
-
-    wchar_t *executable;        /* sys.executable */
-    wchar_t *base_executable;   /* sys._base_executable */
-    wchar_t *prefix;            /* sys.prefix */
-    wchar_t *base_prefix;       /* sys.base_prefix */
-    wchar_t *exec_prefix;       /* sys.exec_prefix */
-    wchar_t *base_exec_prefix;  /* sys.base_exec_prefix */
-    wchar_t *platlibdir;        /* sys.platlibdir */
+    int module_search_paths_set;
+    PyWideStringList module_search_paths;
+    wchar_t *executable;
+    wchar_t *base_executable;
+    wchar_t *prefix;
+    wchar_t *base_prefix;
+    wchar_t *exec_prefix;
+    wchar_t *base_exec_prefix;
 
     /* --- Parameter only used by Py_Main() ---------- */
-
-    /* Skip the first line of the source ('run_filename' parameter), allowing use of non-Unix forms of
-       "#!cmd".  This is intended for a DOS specific hack only.
-
-       Set by the -x command line option. */
     int skip_source_first_line;
-
-    wchar_t *run_command;   /* -c command line argument */
-    wchar_t *run_module;    /* -m command line argument */
-    wchar_t *run_filename;  /* Trailing command line argument without -c or -m */
+    wchar_t *run_command;
+    wchar_t *run_module;
+    wchar_t *run_filename;
 
     /* --- Private fields ---------------------------- */
 
-    /* Install importlib? If set to 0, importlib is not initialized at all.
-       Needed by freeze_importlib. */
+    // Install importlib? If equals to 0, importlib is not initialized at all.
+    // Needed by freeze_importlib.
     int _install_importlib;
 
-    /* If equal to 0, stop Python initialization before the "main" phase */
+    // If equal to 0, stop Python initialization before the "main" phase.
     int _init_main;
 
-    /* If non-zero, disallow threads, subprocesses, and fork.
-       Default: 0. */
+    // If non-zero, disallow threads, subprocesses, and fork.
+    // Default: 0.
     int _isolated_interpreter;
-
-    /* Original command line arguments. If _orig_argv is empty and _argv is
-       not equal to [''], PyConfig_Read() copies the configuration 'argv' list
-       into '_orig_argv' list before modifying 'argv' list (if parse_argv
-       is non-zero).
-
-       _PyConfig_Write() initializes Py_GetArgcArgv() to this list. */
-    PyWideStringList _orig_argv;
 } PyConfig;
 
 PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config);
@@ -452,7 +239,7 @@
 
 /* Get the original command line arguments, before Python modified them.
 
-   See also PyConfig._orig_argv. */
+   See also PyConfig.orig_argv. */
 PyAPI_FUNC(void) Py_GetArgcArgv(int *argc, wchar_t ***argv);
 
 #ifdef __cplusplus
diff --git a/include/cpython/interpreteridobject.h b/include/cpython/interpreteridobject.h
index 67ec587..5076584 100644
--- a/include/cpython/interpreteridobject.h
+++ b/include/cpython/interpreteridobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Interpreter ID Object */
 
 PyAPI_DATA(PyTypeObject) _PyInterpreterID_Type;
@@ -13,7 +9,3 @@
 PyAPI_FUNC(PyObject *) _PyInterpreterID_New(int64_t);
 PyAPI_FUNC(PyObject *) _PyInterpreterState_GetIDObject(PyInterpreterState *);
 PyAPI_FUNC(PyInterpreterState *) _PyInterpreterID_LookUp(PyObject *);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/listobject.h b/include/cpython/listobject.h
index 74fe330..e323915 100644
--- a/include/cpython/listobject.h
+++ b/include/cpython/listobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
     PyObject_VAR_HEAD
     /* Vector of pointers to list elements.  list[0] is ob_item[0], etc. */
@@ -30,14 +26,9 @@
 
 /* Macro, trading safety for speed */
 
-/* Cast argument to PyTupleObject* type. */
+/* Cast argument to PyListObject* type. */
 #define _PyList_CAST(op) (assert(PyList_Check(op)), (PyListObject *)(op))
 
 #define PyList_GET_ITEM(op, i) (_PyList_CAST(op)->ob_item[i])
-#define PyList_SET_ITEM(op, i, v) (_PyList_CAST(op)->ob_item[i] = (v))
+#define PyList_SET_ITEM(op, i, v) ((void)(_PyList_CAST(op)->ob_item[i] = (v)))
 #define PyList_GET_SIZE(op)    Py_SIZE(_PyList_CAST(op))
-#define _PyList_ITEMS(op)      (_PyList_CAST(op)->ob_item)
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/object.h b/include/cpython/object.h
index 444f832..84c60e5 100644
--- a/include/cpython/object.h
+++ b/include/cpython/object.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyAPI_FUNC(void) _Py_NewReference(PyObject *op);
 
 #ifdef Py_TRACE_REFS
@@ -13,10 +9,6 @@
 PyAPI_FUNC(void) _Py_ForgetReference(PyObject *);
 #endif
 
-/* Update the Python traceback of an object. This function must be called
-   when a memory block is reused from a free list. */
-PyAPI_FUNC(int) _PyTraceMalloc_NewReference(PyObject *op);
-
 #ifdef Py_REF_DEBUG
 PyAPI_FUNC(Py_ssize_t) _Py_GetRefTotal(void);
 #endif
@@ -43,12 +35,13 @@
    _PyObject_{Get,Set,Has}AttrId are __getattr__ versions using _Py_Identifier*.
 */
 typedef struct _Py_Identifier {
-    struct _Py_Identifier *next;
     const char* string;
-    PyObject *object;
+    // Index in PyInterpreterState.unicode.ids.array. It is process-wide
+    // unique and must be initialized to -1.
+    Py_ssize_t index;
 } _Py_Identifier;
 
-#define _Py_static_string_init(value) { .next = NULL, .string = value, .object = NULL }
+#define _Py_static_string_init(value) { .string = value, .index = -1 }
 #define _Py_static_string(varname, value)  static _Py_Identifier varname = _Py_static_string_init(value)
 #define _Py_IDENTIFIER(varname) _Py_static_string(PyId_##varname, #varname)
 
@@ -175,10 +168,13 @@
     objobjargproc mp_ass_subscript;
 } PyMappingMethods;
 
+typedef PySendResult (*sendfunc)(PyObject *iter, PyObject *value, PyObject **result);
+
 typedef struct {
     unaryfunc am_await;
     unaryfunc am_aiter;
     unaryfunc am_anext;
+    sendfunc am_send;
 } PyAsyncMethods;
 
 typedef struct {
@@ -190,6 +186,8 @@
  * backwards-compatibility */
 typedef Py_ssize_t printfunc;
 
+// If this structure is modified, Doc/includes/typestruct.h should be updated
+// as well.
 struct _typeobject {
     PyObject_VAR_HEAD
     const char *tp_name; /* For printing, in format "<module>.<name>" */
@@ -249,6 +247,7 @@
     struct PyMethodDef *tp_methods;
     struct PyMemberDef *tp_members;
     struct PyGetSetDef *tp_getset;
+    // Strong reference on a heap type, borrowed reference on a static type
     struct _typeobject *tp_base;
     PyObject *tp_dict;
     descrgetfunc tp_descr_get;
@@ -304,6 +303,8 @@
 PyAPI_FUNC(PyTypeObject *) _PyType_CalculateMetaclass(PyTypeObject *, PyObject *);
 PyAPI_FUNC(PyObject *) _PyType_GetDocFromInternalDoc(const char *, const char *);
 PyAPI_FUNC(PyObject *) _PyType_GetTextSignatureFromInternalDoc(const char *, const char *);
+struct PyModuleDef;
+PyAPI_FUNC(PyObject *) _PyType_GetModuleByDef(PyTypeObject *, struct PyModuleDef *);
 
 struct _Py_Identifier;
 PyAPI_FUNC(int) PyObject_Print(PyObject *, FILE *, int);
@@ -314,7 +315,6 @@
 PyAPI_FUNC(int) _PyObject_IsAbstract(PyObject *);
 PyAPI_FUNC(PyObject *) _PyObject_GetAttrId(PyObject *, struct _Py_Identifier *);
 PyAPI_FUNC(int) _PyObject_SetAttrId(PyObject *, struct _Py_Identifier *, PyObject *);
-PyAPI_FUNC(int) _PyObject_HasAttrId(PyObject *, struct _Py_Identifier *);
 /* Replacements of PyObject_GetAttr() and _PyObject_GetAttrId() which
    don't raise AttributeError.
 
@@ -519,6 +519,8 @@
 /* Python 3.9 private API, invoked by the macros below. */
 PyAPI_FUNC(int) _PyTrash_begin(struct _ts *tstate, PyObject *op);
 PyAPI_FUNC(void) _PyTrash_end(struct _ts *tstate);
+/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */
+PyAPI_FUNC(int) _PyTrash_cond(PyObject *op, destructor dealloc);
 
 #define PyTrash_UNWIND_LEVEL 50
 
@@ -528,7 +530,7 @@
         /* If "cond" is false, then _tstate remains NULL and the deallocator \
          * is run normally without involving the trashcan */ \
         if (cond) { \
-            _tstate = PyThreadState_GET(); \
+            _tstate = PyThreadState_Get(); \
             if (_PyTrash_begin(_tstate, _PyObject_CAST(op))) { \
                 break; \
             } \
@@ -542,13 +544,9 @@
 
 #define Py_TRASHCAN_BEGIN(op, dealloc) \
     Py_TRASHCAN_BEGIN_CONDITION(op, \
-        Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))
+        _PyTrash_cond(_PyObject_CAST(op), (destructor)dealloc))
 
 /* For backwards compatibility, these macros enable the trashcan
  * unconditionally */
 #define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
 #define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/objimpl.h b/include/cpython/objimpl.h
index b835936..d83700e 100644
--- a/include/cpython/objimpl.h
+++ b/include/cpython/objimpl.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize )
 
 /* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
@@ -41,8 +37,9 @@
        PyObject *op;
 
        op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
-       if (op == NULL)
-       return PyErr_NoMemory();
+       if (op == NULL) {
+           return PyErr_NoMemory();
+       }
 
        PyObject_Init(op, &YourTypeStruct);
 
@@ -55,40 +52,6 @@
    the 1st step is performed automatically for you, so in a C++ class
    constructor you would start directly with PyObject_Init/InitVar. */
 
-
-/* Inline functions trading binary compatibility for speed:
-   PyObject_INIT() is the fast version of PyObject_Init(), and
-   PyObject_INIT_VAR() is the fast version of PyObject_InitVar().
-
-   These inline functions must not be called with op=NULL. */
-static inline PyObject*
-_PyObject_INIT(PyObject *op, PyTypeObject *typeobj)
-{
-    assert(op != NULL);
-    Py_SET_TYPE(op, typeobj);
-    if (PyType_GetFlags(typeobj) & Py_TPFLAGS_HEAPTYPE) {
-        Py_INCREF(typeobj);
-    }
-    _Py_NewReference(op);
-    return op;
-}
-
-#define PyObject_INIT(op, typeobj) \
-    _PyObject_INIT(_PyObject_CAST(op), (typeobj))
-
-static inline PyVarObject*
-_PyObject_INIT_VAR(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
-{
-    assert(op != NULL);
-    Py_SET_SIZE(op, size);
-    PyObject_INIT((PyObject *)op, typeobj);
-    return op;
-}
-
-#define PyObject_INIT_VAR(op, typeobj, size) \
-    _PyObject_INIT_VAR(_PyVarObject_CAST(op), (typeobj), (size))
-
-
 /* This function returns the number of allocated memory blocks, regardless of size */
 PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void);
 
@@ -116,10 +79,6 @@
 PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator);
 
 
-PyAPI_FUNC(Py_ssize_t) _PyGC_CollectNoFail(void);
-PyAPI_FUNC(Py_ssize_t) _PyGC_CollectIfEnabled(void);
-
-
 /* Test if an object implements the garbage collector protocol */
 PyAPI_FUNC(int) PyObject_IS_GC(PyObject *obj);
 
@@ -139,7 +98,3 @@
 #define PyType_SUPPORTS_WEAKREFS(t) ((t)->tp_weaklistoffset > 0)
 
 PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/odictobject.h b/include/cpython/odictobject.h
similarity index 100%
rename from include/odictobject.h
rename to include/cpython/odictobject.h
diff --git a/include/picklebufobject.h b/include/cpython/picklebufobject.h
similarity index 100%
rename from include/picklebufobject.h
rename to include/cpython/picklebufobject.h
diff --git a/include/pyctype.h b/include/cpython/pyctype.h
similarity index 94%
rename from include/pyctype.h
rename to include/cpython/pyctype.h
index 6bce63e..729d932 100644
--- a/include/pyctype.h
+++ b/include/cpython/pyctype.h
@@ -1,6 +1,9 @@
 #ifndef Py_LIMITED_API
 #ifndef PYCTYPE_H
 #define PYCTYPE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
 
 #define PY_CTF_LOWER  0x01
 #define PY_CTF_UPPER  0x02
@@ -29,5 +32,8 @@
 #define Py_TOLOWER(c) (_Py_ctype_tolower[Py_CHARMASK(c)])
 #define Py_TOUPPER(c) (_Py_ctype_toupper[Py_CHARMASK(c)])
 
+#ifdef __cplusplus
+}
+#endif
 #endif /* !PYCTYPE_H */
 #endif /* !Py_LIMITED_API */
diff --git a/include/pydebug.h b/include/cpython/pydebug.h
similarity index 100%
rename from include/pydebug.h
rename to include/cpython/pydebug.h
diff --git a/include/cpython/pyerrors.h b/include/cpython/pyerrors.h
index 9c87b53..3f95245 100644
--- a/include/cpython/pyerrors.h
+++ b/include/cpython/pyerrors.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Error objects */
 
 /* PyException_HEAD defines the initial segment of every exception class. */
@@ -24,6 +20,8 @@
     PyObject *filename;
     PyObject *lineno;
     PyObject *offset;
+    PyObject *end_lineno;
+    PyObject *end_offset;
     PyObject *text;
     PyObject *print_file_and_line;
 } PySyntaxErrorObject;
@@ -66,6 +64,17 @@
     PyObject *value;
 } PyStopIterationObject;
 
+typedef struct {
+    PyException_HEAD
+    PyObject *name;
+} PyNameErrorObject;
+
+typedef struct {
+    PyException_HEAD
+    PyObject *obj;
+    PyObject *name;
+} PyAttributeErrorObject;
+
 /* Compatibility typedefs */
 typedef PyOSErrorObject PyEnvironmentErrorObject;
 #ifdef MS_WINDOWS
@@ -82,10 +91,6 @@
 
 PyAPI_FUNC(void) _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *);
 
-/* */
-
-#define PyExceptionClass_Name(x)  (((PyTypeObject*)(x))->tp_name)
-
 /* Convenience functions */
 
 #ifdef MS_WINDOWS
@@ -145,6 +150,13 @@
     int lineno,
     int col_offset);
 
+PyAPI_FUNC(void) PyErr_RangedSyntaxLocationObject(
+    PyObject *filename,
+    int lineno,
+    int col_offset,
+    int end_lineno,
+    int end_col_offset);
+
 PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject(
     PyObject *filename,
     int lineno);
@@ -173,6 +185,12 @@
     Py_ssize_t end,
     const char *reason          /* UTF-8 encoded string */
     );
+
+PyAPI_FUNC(PyObject *) _PyErr_ProgramDecodedTextObject(
+    PyObject *filename,
+    int lineno,
+    const char* encoding);
+
 PyAPI_FUNC(PyObject *) _PyUnicodeTranslateError_Create(
     PyObject *object,
     Py_ssize_t start,
@@ -194,7 +212,3 @@
     ...);
 
 #define Py_FatalError(message) _Py_FatalErrorFunc(__func__, message)
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/pyfpe.h b/include/cpython/pyfpe.h
similarity index 100%
rename from include/pyfpe.h
rename to include/cpython/pyfpe.h
diff --git a/include/cpython/pylifecycle.h b/include/cpython/pylifecycle.h
index eb523b8..5faeb35 100644
--- a/include/cpython/pylifecycle.h
+++ b/include/cpython/pylifecycle.h
@@ -2,9 +2,9 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* Py_FrozenMain is kept out of the Limited API until documented and present
+   in all builds of Python */
+PyAPI_FUNC(int) Py_FrozenMain(int argc, char **argv);
 
 /* Only used by applications that embed the interpreter and need to
  * override the standard encoding determination mechanism
@@ -39,15 +39,11 @@
 
 PyAPI_FUNC(void) _Py_NO_RETURN Py_ExitStatusException(PyStatus err);
 
-/* Py_PyAtExit is for the atexit module, Py_AtExit is for low-level
- * exit functions.
- */
-PyAPI_FUNC(void) _Py_PyAtExit(void (*func)(PyObject *), PyObject *);
-
 /* Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. */
 PyAPI_FUNC(void) _Py_RestoreSignals(void);
 
 PyAPI_FUNC(int) Py_FdIsInteractive(FILE *, const char *);
+PyAPI_FUNC(int) _Py_FdIsInteractive(FILE *fp, PyObject *filename);
 
 PyAPI_FUNC(void) _Py_SetProgramFullPath(const wchar_t *);
 
@@ -66,7 +62,3 @@
 PyAPI_FUNC(char *) _Py_SetLocaleFromEnv(int category);
 
 PyAPI_FUNC(PyThreadState *) _Py_NewInterpreter(int isolated_subinterpreter);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/pymem.h b/include/cpython/pymem.h
index 79f063b..d1054d7 100644
--- a/include/cpython/pymem.h
+++ b/include/cpython/pymem.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyAPI_FUNC(void *) PyMem_RawMalloc(size_t size);
 PyAPI_FUNC(void *) PyMem_RawCalloc(size_t nelem, size_t elsize);
 PyAPI_FUNC(void *) PyMem_RawRealloc(void *ptr, size_t new_size);
@@ -14,8 +10,6 @@
 /* Try to get the allocators name set by _PyMem_SetupAllocators(). */
 PyAPI_FUNC(const char*) _PyMem_GetCurrentAllocatorName(void);
 
-PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize);
-
 /* strdup() using PyMem_RawMalloc() */
 PyAPI_FUNC(char *) _PyMem_RawStrdup(const char *str);
 
@@ -102,7 +96,3 @@
 
    The function does nothing if Python is not compiled is debug mode. */
 PyAPI_FUNC(void) PyMem_SetupDebugHooks(void);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/pystate.h b/include/cpython/pystate.h
index f292da1..7c995b9 100644
--- a/include/cpython/pystate.h
+++ b/include/cpython/pystate.h
@@ -2,12 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "cpython/initconfig.h"
-
 PyAPI_FUNC(int) _PyInterpreterState_RequiresIDRef(PyInterpreterState *);
 PyAPI_FUNC(void) _PyInterpreterState_RequireIDRef(PyInterpreterState *, int);
 
@@ -33,6 +27,21 @@
 #define PyTrace_OPCODE 7
 
 
+typedef struct _cframe {
+    /* This struct will be threaded through the C stack
+     * allowing fast access to per-thread state that needs
+     * to be accessed quickly by the interpreter, but can
+     * be modified outside of the interpreter.
+     *
+     * WARNING: This makes data on the C stack accessible from
+     * heap objects. Care must be taken to maintain stack
+     * discipline and make sure that instances of this struct cannot
+     * accessed outside of their lifetime.
+     */
+    int use_tracing;
+    struct _cframe *previous;
+} CFrame;
+
 typedef struct _err_stackitem {
     /* This struct represents an entry on the exception stack, which is a
      * per-coroutine state. (Coroutine in the computer science sense,
@@ -58,17 +67,17 @@
     /* Borrowed reference to the current frame (it can be NULL) */
     PyFrameObject *frame;
     int recursion_depth;
-    char overflowed; /* The stack has overflowed. Allow 50 more calls
-                        to handle the runtime error. */
-    char recursion_critical; /* The current calls must not cause
-                                a stack overflow. */
+    int recursion_headroom; /* Allow 50 more calls to handle any errors. */
     int stackcheck_counter;
 
     /* 'tracing' keeps track of the execution depth when tracing/profiling.
        This is to prevent the actual trace/profile code from being recorded in
        the trace/profile. */
     int tracing;
-    int use_tracing;
+
+    /* Pointer to current CFrame in the C stack frame of the currently,
+     * or most recently, executing _PyEval_EvalFrameDefault. */
+    CFrame *cframe;
 
     Py_tracefunc c_profilefunc;
     Py_tracefunc c_tracefunc;
@@ -136,6 +145,8 @@
     /* Unique thread state id. */
     uint64_t id;
 
+    CFrame root_cframe;
+
     /* XXX signal handlers should also be here */
 
 };
@@ -173,6 +184,11 @@
 */
 PyAPI_FUNC(PyObject *) _PyThread_CurrentFrames(void);
 
+/* The implementation of sys._current_exceptions()  Returns a dict mapping
+   thread id to that thread's current exception.
+*/
+PyAPI_FUNC(PyObject *) _PyThread_CurrentExceptions(void);
+
 /* Routines for advanced debuggers, requested by David Beazley.
    Don't use unless you know what you are doing! */
 PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Main(void);
@@ -194,7 +210,37 @@
 
 PyAPI_FUNC(const PyConfig*) _PyInterpreterState_GetConfig(PyInterpreterState *interp);
 
-// Get the configuration of the currrent interpreter.
+/* Get a copy of the current interpreter configuration.
+
+   Return 0 on success. Raise an exception and return -1 on error.
+
+   The caller must initialize 'config', using PyConfig_InitPythonConfig()
+   for example.
+
+   Python must be preinitialized to call this method.
+   The caller must hold the GIL. */
+PyAPI_FUNC(int) _PyInterpreterState_GetConfigCopy(
+    struct PyConfig *config);
+
+/* Set the configuration of the current interpreter.
+
+   This function should be called during or just after the Python
+   initialization.
+
+   Update the sys module with the new configuration. If the sys module was
+   modified directly after the Python initialization, these changes are lost.
+
+   Some configuration like faulthandler or warnoptions can be updated in the
+   configuration, but don't reconfigure Python (don't enable/disable
+   faulthandler and don't reconfigure warnings filters).
+
+   Return 0 on success. Raise an exception and return -1 on error.
+
+   The configuration should come from _PyInterpreterState_GetConfigCopy(). */
+PyAPI_FUNC(int) _PyInterpreterState_SetConfig(
+    const struct PyConfig *config);
+
+// Get the configuration of the current interpreter.
 // The caller must hold the GIL.
 PyAPI_FUNC(const PyConfig*) _Py_GetConfig(void);
 
@@ -257,7 +303,3 @@
 
 PyAPI_FUNC(int) _PyCrossInterpreterData_RegisterClass(PyTypeObject *, crossinterpdatafunc);
 PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/pythonrun.h b/include/cpython/pythonrun.h
new file mode 100644
index 0000000..2e72d08
--- /dev/null
+++ b/include/cpython/pythonrun.h
@@ -0,0 +1,121 @@
+#ifndef Py_CPYTHON_PYTHONRUN_H
+#  error "this header file must not be included directly"
+#endif
+
+PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *);
+PyAPI_FUNC(int) _PyRun_SimpleFileObject(
+    FILE *fp,
+    PyObject *filename,
+    int closeit,
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) PyRun_AnyFileExFlags(
+    FILE *fp,
+    const char *filename,       /* decoded from the filesystem encoding */
+    int closeit,
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) _PyRun_AnyFileObject(
+    FILE *fp,
+    PyObject *filename,
+    int closeit,
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) PyRun_SimpleFileExFlags(
+    FILE *fp,
+    const char *filename,       /* decoded from the filesystem encoding */
+    int closeit,
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) PyRun_InteractiveOneFlags(
+    FILE *fp,
+    const char *filename,       /* decoded from the filesystem encoding */
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) PyRun_InteractiveOneObject(
+    FILE *fp,
+    PyObject *filename,
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) PyRun_InteractiveLoopFlags(
+    FILE *fp,
+    const char *filename,       /* decoded from the filesystem encoding */
+    PyCompilerFlags *flags);
+PyAPI_FUNC(int) _PyRun_InteractiveLoopObject(
+    FILE *fp,
+    PyObject *filename,
+    PyCompilerFlags *flags);
+
+
+PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *,
+                                         PyObject *, PyCompilerFlags *);
+
+PyAPI_FUNC(PyObject *) PyRun_FileExFlags(
+    FILE *fp,
+    const char *filename,       /* decoded from the filesystem encoding */
+    int start,
+    PyObject *globals,
+    PyObject *locals,
+    int closeit,
+    PyCompilerFlags *flags);
+
+
+PyAPI_FUNC(PyObject *) Py_CompileStringExFlags(
+    const char *str,
+    const char *filename,       /* decoded from the filesystem encoding */
+    int start,
+    PyCompilerFlags *flags,
+    int optimize);
+PyAPI_FUNC(PyObject *) Py_CompileStringObject(
+    const char *str,
+    PyObject *filename, int start,
+    PyCompilerFlags *flags,
+    int optimize);
+
+#define Py_CompileString(str, p, s) Py_CompileStringExFlags(str, p, s, NULL, -1)
+#define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags(str, p, s, f, -1)
+
+
+PyAPI_FUNC(const char *) _Py_SourceAsString(
+    PyObject *cmd,
+    const char *funcname,
+    const char *what,
+    PyCompilerFlags *cf,
+    PyObject **cmd_copy);
+
+
+/* A function flavor is also exported by libpython. It is required when
+    libpython is accessed directly rather than using header files which defines
+    macros below. On Windows, for example, PyAPI_FUNC() uses dllexport to
+    export functions in pythonXX.dll. */
+PyAPI_FUNC(PyObject *) PyRun_String(const char *str, int s, PyObject *g, PyObject *l);
+PyAPI_FUNC(int) PyRun_AnyFile(FILE *fp, const char *name);
+PyAPI_FUNC(int) PyRun_AnyFileEx(FILE *fp, const char *name, int closeit);
+PyAPI_FUNC(int) PyRun_AnyFileFlags(FILE *, const char *, PyCompilerFlags *);
+PyAPI_FUNC(int) PyRun_SimpleString(const char *s);
+PyAPI_FUNC(int) PyRun_SimpleFile(FILE *f, const char *p);
+PyAPI_FUNC(int) PyRun_SimpleFileEx(FILE *f, const char *p, int c);
+PyAPI_FUNC(int) PyRun_InteractiveOne(FILE *f, const char *p);
+PyAPI_FUNC(int) PyRun_InteractiveLoop(FILE *f, const char *p);
+PyAPI_FUNC(PyObject *) PyRun_File(FILE *fp, const char *p, int s, PyObject *g, PyObject *l);
+PyAPI_FUNC(PyObject *) PyRun_FileEx(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, int c);
+PyAPI_FUNC(PyObject *) PyRun_FileFlags(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, PyCompilerFlags *flags);
+
+/* Use macros for a bunch of old variants */
+#define PyRun_String(str, s, g, l) PyRun_StringFlags(str, s, g, l, NULL)
+#define PyRun_AnyFile(fp, name) PyRun_AnyFileExFlags(fp, name, 0, NULL)
+#define PyRun_AnyFileEx(fp, name, closeit) \
+    PyRun_AnyFileExFlags(fp, name, closeit, NULL)
+#define PyRun_AnyFileFlags(fp, name, flags) \
+    PyRun_AnyFileExFlags(fp, name, 0, flags)
+#define PyRun_SimpleString(s) PyRun_SimpleStringFlags(s, NULL)
+#define PyRun_SimpleFile(f, p) PyRun_SimpleFileExFlags(f, p, 0, NULL)
+#define PyRun_SimpleFileEx(f, p, c) PyRun_SimpleFileExFlags(f, p, c, NULL)
+#define PyRun_InteractiveOne(f, p) PyRun_InteractiveOneFlags(f, p, NULL)
+#define PyRun_InteractiveLoop(f, p) PyRun_InteractiveLoopFlags(f, p, NULL)
+#define PyRun_File(fp, p, s, g, l) \
+    PyRun_FileExFlags(fp, p, s, g, l, 0, NULL)
+#define PyRun_FileEx(fp, p, s, g, l, c) \
+    PyRun_FileExFlags(fp, p, s, g, l, c, NULL)
+#define PyRun_FileFlags(fp, p, s, g, l, flags) \
+    PyRun_FileExFlags(fp, p, s, g, l, 0, flags)
+
+
+/* Stuff with no proper home (yet) */
+PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *);
+PyAPI_DATA(PyThreadState*) _PyOS_ReadlineTState;
+PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *);
diff --git a/include/pytime.h b/include/cpython/pytime.h
similarity index 89%
rename from include/pytime.h
rename to include/cpython/pytime.h
index bdda1da..754c7f4 100644
--- a/include/pytime.h
+++ b/include/cpython/pytime.h
@@ -2,9 +2,6 @@
 #ifndef Py_PYTIME_H
 #define Py_PYTIME_H
 
-#include "pyconfig.h" /* include for defines */
-#include "object.h"
-
 /**************************************************************************
 Symbols and macros to supply platform-independent interfaces to time related
 functions and constants
@@ -91,13 +88,13 @@
 PyAPI_FUNC(int) _PyTime_FromNanosecondsObject(_PyTime_t *t,
     PyObject *obj);
 
-/* Convert a number of seconds (Python float or int) to a timetamp.
+/* Convert a number of seconds (Python float or int) to a timestamp.
    Raise an exception and return -1 on error, return 0 on success. */
 PyAPI_FUNC(int) _PyTime_FromSecondsObject(_PyTime_t *t,
     PyObject *obj,
     _PyTime_round_t round);
 
-/* Convert a number of milliseconds (Python float or int, 10^-3) to a timetamp.
+/* Convert a number of milliseconds (Python float or int, 10^-3) to a timestamp.
    Raise an exception and return -1 on error, return 0 on success. */
 PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t,
     PyObject *obj,
@@ -164,22 +161,6 @@
     _PyTime_t mul,
     _PyTime_t div);
 
-/* Get the current time from the system clock.
-
-   The function cannot fail. _PyTime_Init() ensures that the system clock
-   works. */
-PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void);
-
-/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards.
-   The clock is not affected by system clock updates. The reference point of
-   the returned value is undefined, so that only the difference between the
-   results of consecutive calls is valid.
-
-   The function cannot fail. _PyTime_Init() ensures that a monotonic clock
-   is available and works. */
-PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void);
-
-
 /* Structure used by time.get_clock_info() */
 typedef struct {
     const char *implementation;
@@ -189,8 +170,17 @@
 } _Py_clock_info_t;
 
 /* Get the current time from the system clock.
- * Fill clock information if info is not NULL.
- * Raise an exception and return -1 on error, return 0 on success.
+
+   If the internal clock fails, silently ignore the error and return 0.
+   On integer overflow, silently ignore the overflow and truncated the clock to
+   _PyTime_MIN or _PyTime_MAX.
+
+   Use _PyTime_GetSystemClockWithInfo() to check for failure. */
+PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void);
+
+/* Get the current time from the system clock.
+ * On success, set *t and *info (if not NULL), and return 0.
+ * On error, raise an exception and return -1.
  */
 PyAPI_FUNC(int) _PyTime_GetSystemClockWithInfo(
     _PyTime_t *t,
@@ -201,6 +191,18 @@
    the returned value is undefined, so that only the difference between the
    results of consecutive calls is valid.
 
+   If the internal clock fails, silently ignore the error and return 0.
+   On integer overflow, silently ignore the overflow and truncated the clock to
+   _PyTime_MIN or _PyTime_MAX.
+
+   Use _PyTime_GetMonotonicClockWithInfo() to check for failure. */
+PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void);
+
+/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards.
+   The clock is not affected by system clock updates. The reference point of
+   the returned value is undefined, so that only the difference between the
+   results of consecutive calls is valid.
+
    Fill info (if set) with information of the function used to get the time.
 
    Return 0 on success, raise an exception and return -1 on error. */
@@ -209,10 +211,6 @@
     _Py_clock_info_t *info);
 
 
-/* Initialize time.
-   Return 0 on success, raise an exception and return -1 on error. */
-PyAPI_FUNC(int) _PyTime_Init(void);
-
 /* Converts a timestamp to the Gregorian time, using the local time zone.
    Return 0 on success, raise an exception and return -1 on error. */
 PyAPI_FUNC(int) _PyTime_localtime(time_t t, struct tm *tm);
@@ -224,8 +222,11 @@
 /* Get the performance counter: clock with the highest available resolution to
    measure a short duration.
 
-   The function cannot fail. _PyTime_Init() ensures that the system clock
-   works. */
+   If the internal clock fails, silently ignore the error and return 0.
+   On integer overflow, silently ignore the overflow and truncated the clock to
+   _PyTime_MIN or _PyTime_MAX.
+
+   Use _PyTime_GetPerfCounterWithInfo() to check for failure. */
 PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void);
 
 /* Get the performance counter: clock with the highest available resolution to
diff --git a/include/cpython/sysmodule.h b/include/cpython/sysmodule.h
index 1802b5b..fc4c899 100644
--- a/include/cpython/sysmodule.h
+++ b/include/cpython/sysmodule.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 PyAPI_FUNC(PyObject *) _PySys_GetObjectId(_Py_Identifier *key);
 PyAPI_FUNC(int) _PySys_SetObjectId(_Py_Identifier *key, PyObject *);
 
@@ -18,7 +14,3 @@
     const char *argFormat,
     ...);
 PyAPI_FUNC(int) PySys_AddAuditHook(Py_AuditHookFunction, void*);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/traceback.h b/include/cpython/traceback.h
index 837470c..aac5b42 100644
--- a/include/cpython/traceback.h
+++ b/include/cpython/traceback.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct _traceback {
     PyObject_HEAD
     struct _traceback *tb_next;
@@ -16,7 +12,3 @@
 
 PyAPI_FUNC(int) _Py_DisplaySourceLine(PyObject *, PyObject *, int, int);
 PyAPI_FUNC(void) _PyTraceback_Add(const char *, const char *, int);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/tupleobject.h b/include/cpython/tupleobject.h
index 1565f2a..7cada88 100644
--- a/include/cpython/tupleobject.h
+++ b/include/cpython/tupleobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
     PyObject_VAR_HEAD
     /* ob_item contains space for 'ob_size' elements.
@@ -27,10 +23,6 @@
 #define PyTuple_GET_ITEM(op, i) (_PyTuple_CAST(op)->ob_item[i])
 
 /* Macro, *only* to be used to fill in brand new tuples */
-#define PyTuple_SET_ITEM(op, i, v) (_PyTuple_CAST(op)->ob_item[i] = v)
+#define PyTuple_SET_ITEM(op, i, v) ((void)(_PyTuple_CAST(op)->ob_item[i] = v))
 
 PyAPI_FUNC(void) _PyTuple_DebugMallocStats(FILE *out);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/include/cpython/unicodeobject.h b/include/cpython/unicodeobject.h
index 1fc732a..0761f01 100644
--- a/include/cpython/unicodeobject.h
+++ b/include/cpython/unicodeobject.h
@@ -2,10 +2,6 @@
 #  error "this header file must not be included directly"
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Py_UNICODE was the native Unicode storage format (code unit) used by
    Python and represents a single Unicode element in the Unicode type.
    With PEP 393, Py_UNICODE is deprecated and replaced with a
@@ -15,6 +11,10 @@
 
 /* --- Internal Unicode Operations ---------------------------------------- */
 
+#ifndef USE_UNICODE_WCHAR_CACHE
+#  define USE_UNICODE_WCHAR_CACHE 1
+#endif /* USE_UNICODE_WCHAR_CACHE */
+
 /* Since splitting on whitespace is an important use case, and
    whitespace in most situations is solely ASCII whitespace, we
    optimize for the common case by using a quick look-up table
@@ -22,7 +22,7 @@
 
  */
 #define Py_UNICODE_ISSPACE(ch) \
-    ((ch) < 128U ? _Py_ascii_whitespace[(ch)] : _PyUnicode_IsWhitespace(ch))
+    ((Py_UCS4)(ch) < 128U ? _Py_ascii_whitespace[(ch)] : _PyUnicode_IsWhitespace(ch))
 
 #define Py_UNICODE_ISLOWER(ch) _PyUnicode_IsLowercase(ch)
 #define Py_UNICODE_ISUPPER(ch) _PyUnicode_IsUppercase(ch)
@@ -416,7 +416,7 @@
 
 
 /* Fast check to determine whether an object is ready. Equivalent to
-   PyUnicode_IS_COMPACT(op) || ((PyUnicodeObject*)(op))->data.any) */
+   PyUnicode_IS_COMPACT(op) || ((PyUnicodeObject*)(op))->data.any */
 
 #define PyUnicode_IS_READY(op) (((PyASCIIObject*)op)->state.ready)
 
@@ -583,7 +583,7 @@
 
 /* Similar to PyUnicode_AsUnicode(), but raises a ValueError if the string
    contains null characters. */
-Py_DEPRECATED(3.3) PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode(
+PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode(
     PyObject *unicode           /* Unicode object */
     );
 
@@ -597,9 +597,6 @@
     Py_ssize_t *size            /* location where to save the length */
     );
 
-/* Get the maximum ordinal for a Unicode character. */
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE) PyUnicode_GetMax(void);
-
 
 /* --- _PyUnicodeWriter API ----------------------------------------------- */
 
@@ -730,26 +727,6 @@
 /* --- Manage the default encoding ---------------------------------------- */
 
 /* Returns a pointer to the default encoding (UTF-8) of the
-   Unicode object unicode and the size of the encoded representation
-   in bytes stored in *size.
-
-   In case of an error, no *size is set.
-
-   This function caches the UTF-8 encoded string in the unicodeobject
-   and subsequent calls will return the same string.  The memory is released
-   when the unicodeobject is deallocated.
-
-   _PyUnicode_AsStringAndSize is a #define for PyUnicode_AsUTF8AndSize to
-   support the previous internal function with the same behaviour.
-*/
-
-PyAPI_FUNC(const char *) PyUnicode_AsUTF8AndSize(
-    PyObject *unicode,
-    Py_ssize_t *size);
-
-#define _PyUnicode_AsStringAndSize PyUnicode_AsUTF8AndSize
-
-/* Returns a pointer to the default encoding (UTF-8) of the
    Unicode object unicode.
 
    Like PyUnicode_AsUTF8AndSize(), this also caches the UTF-8 representation
@@ -760,13 +737,6 @@
 
    Use of this API is DEPRECATED since no size information can be
    extracted from the returned data.
-
-   *** This API is for interpreter INTERNAL USE ONLY and will likely
-   *** be removed or changed for Python 3.1.
-
-   *** If you need to access the Unicode object as UTF-8 bytes string,
-   *** please use PyUnicode_AsUTF8String() instead.
-
 */
 
 PyAPI_FUNC(const char *) PyUnicode_AsUTF8(PyObject *unicode);
@@ -864,12 +834,21 @@
 
 /* --- Unicode-Escape Codecs ---------------------------------------------- */
 
-/* Helper for PyUnicode_DecodeUnicodeEscape that detects invalid escape
-   chars. */
-PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscape(
+/* Variant of PyUnicode_DecodeUnicodeEscape that supports partial decoding. */
+PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscapeStateful(
         const char *string,     /* Unicode-Escape encoded string */
         Py_ssize_t length,      /* size of string */
         const char *errors,     /* error handling */
+        Py_ssize_t *consumed    /* bytes consumed */
+);
+
+/* Helper for PyUnicode_DecodeUnicodeEscape that detects invalid escape
+   chars. */
+PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscapeInternal(
+        const char *string,     /* Unicode-Escape encoded string */
+        Py_ssize_t length,      /* size of string */
+        const char *errors,     /* error handling */
+        Py_ssize_t *consumed,   /* bytes consumed */
         const char **first_invalid_escape  /* on return, points to first
                                               invalid escaped char in
                                               string. */
@@ -887,6 +866,14 @@
     Py_ssize_t length           /* Number of Py_UNICODE chars to encode */
     );
 
+/* Variant of PyUnicode_DecodeRawUnicodeEscape that supports partial decoding. */
+PyAPI_FUNC(PyObject*) _PyUnicode_DecodeRawUnicodeEscapeStateful(
+        const char *string,     /* Unicode-Escape encoded string */
+        Py_ssize_t length,      /* size of string */
+        const char *errors,     /* error handling */
+        Py_ssize_t *consumed    /* bytes consumed */
+);
+
 /* --- Latin-1 Codecs ----------------------------------------------------- */
 
 PyAPI_FUNC(PyObject*) _PyUnicode_AsLatin1String(
@@ -1167,53 +1154,8 @@
     Py_UCS4 ch       /* Unicode character */
     );
 
-Py_DEPRECATED(3.3) PyAPI_FUNC(size_t) Py_UNICODE_strlen(
-    const Py_UNICODE *u
-    );
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strcpy(
-    Py_UNICODE *s1,
-    const Py_UNICODE *s2);
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strcat(
-    Py_UNICODE *s1, const Py_UNICODE *s2);
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strncpy(
-    Py_UNICODE *s1,
-    const Py_UNICODE *s2,
-    size_t n);
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(int) Py_UNICODE_strcmp(
-    const Py_UNICODE *s1,
-    const Py_UNICODE *s2
-    );
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(int) Py_UNICODE_strncmp(
-    const Py_UNICODE *s1,
-    const Py_UNICODE *s2,
-    size_t n
-    );
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strchr(
-    const Py_UNICODE *s,
-    Py_UNICODE c
-    );
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strrchr(
-    const Py_UNICODE *s,
-    Py_UNICODE c
-    );
-
 PyAPI_FUNC(PyObject*) _PyUnicode_FormatLong(PyObject *, int, int, int);
 
-/* Create a copy of a unicode string ending with a nul character. Return NULL
-   and raise a MemoryError exception on memory allocation failure, otherwise
-   return a new allocated buffer (use PyMem_Free() to free the buffer). */
-
-Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) PyUnicode_AsUnicodeCopy(
-    PyObject *unicode
-    );
-
 /* Return an interned Unicode object for an Identifier; may fail if there is no memory.*/
 PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*);
 
@@ -1221,8 +1163,7 @@
    and where the hash values are equal (i.e. a very probable match) */
 PyAPI_FUNC(int) _PyUnicode_EQ(PyObject *, PyObject *);
 
-PyAPI_FUNC(Py_ssize_t) _PyUnicode_ScanIdentifier(PyObject *);
+PyAPI_FUNC(int) _PyUnicode_WideCharString_Converter(PyObject *, void *);
+PyAPI_FUNC(int) _PyUnicode_WideCharString_Opt_Converter(PyObject *, void *);
 
-#ifdef __cplusplus
-}
-#endif
+PyAPI_FUNC(Py_ssize_t) _PyUnicode_ScanIdentifier(PyObject *);
diff --git a/include/datetime.h b/include/datetime.h
index 5d9f255..bb56520 100644
--- a/include/datetime.h
+++ b/include/datetime.h
@@ -115,6 +115,10 @@
 
 
 /* Apply for date and datetime instances. */
+
+// o is a pointer to a time or a datetime object.
+#define _PyDateTime_HAS_TZINFO(o)  (((_PyDateTime_BaseTZInfo *)(o))->hastzinfo)
+
 #define PyDateTime_GET_YEAR(o)     ((((PyDateTime_Date*)o)->data[0] << 8) | \
                      ((PyDateTime_Date*)o)->data[1])
 #define PyDateTime_GET_MONTH(o)    (((PyDateTime_Date*)o)->data[2])
@@ -128,6 +132,8 @@
      (((PyDateTime_DateTime*)o)->data[8] << 8)  |       \
       ((PyDateTime_DateTime*)o)->data[9])
 #define PyDateTime_DATE_GET_FOLD(o)        (((PyDateTime_DateTime*)o)->fold)
+#define PyDateTime_DATE_GET_TZINFO(o)      (_PyDateTime_HAS_TZINFO(o) ? \
+    ((PyDateTime_DateTime *)(o))->tzinfo : Py_None)
 
 /* Apply for time instances. */
 #define PyDateTime_TIME_GET_HOUR(o)        (((PyDateTime_Time*)o)->data[0])
@@ -138,6 +144,8 @@
      (((PyDateTime_Time*)o)->data[4] << 8)  |           \
       ((PyDateTime_Time*)o)->data[5])
 #define PyDateTime_TIME_GET_FOLD(o)        (((PyDateTime_Time*)o)->fold)
+#define PyDateTime_TIME_GET_TZINFO(o)      (_PyDateTime_HAS_TZINFO(o) ? \
+    ((PyDateTime_Time *)(o))->tzinfo : Py_None)
 
 /* Apply for time delta instances */
 #define PyDateTime_DELTA_GET_DAYS(o)         (((PyDateTime_Delta*)o)->days)
diff --git a/include/descrobject.h b/include/descrobject.h
index ead269d..703bc8f 100644
--- a/include/descrobject.h
+++ b/include/descrobject.h
@@ -93,7 +93,7 @@
 #ifndef Py_LIMITED_API
 PyAPI_FUNC(PyObject *) PyDescr_NewWrapper(PyTypeObject *,
                                                 struct wrapperbase *, void *);
-#define PyDescr_IsData(d) (Py_TYPE(d)->tp_descr_set != NULL)
+PyAPI_FUNC(int) PyDescr_IsData(PyObject *);
 #endif
 
 PyAPI_FUNC(PyObject *) PyDictProxy_New(PyObject *);
diff --git a/include/dictobject.h b/include/dictobject.h
index c88b0aa..da5a36b 100644
--- a/include/dictobject.h
+++ b/include/dictobject.h
@@ -57,6 +57,9 @@
 PyAPI_FUNC(PyObject *) PyDict_GetItemString(PyObject *dp, const char *key);
 PyAPI_FUNC(int) PyDict_SetItemString(PyObject *dp, const char *key, PyObject *item);
 PyAPI_FUNC(int) PyDict_DelItemString(PyObject *dp, const char *key);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+PyAPI_FUNC(PyObject *) PyObject_GenericGetDict(PyObject *, void *);
+#endif
 
 /* Dictionary (keys, values, items) views */
 
diff --git a/include/errcode.h b/include/errcode.h
index 790518b..54ae929 100644
--- a/include/errcode.h
+++ b/include/errcode.h
@@ -30,6 +30,7 @@
 #define E_EOLS          24      /* EOL in single-quoted string */
 #define E_LINECONT      25      /* Unexpected characters after a line continuation */
 #define E_BADSINGLE     27      /* Ill-formed single statement input */
+#define E_INTERACT_STOP 28      /* Interactive mode stopped tokenization */
 
 #ifdef __cplusplus
 }
diff --git a/include/eval.h b/include/eval.h
index 2c1c2d0..eda28df 100644
--- a/include/eval.h
+++ b/include/eval.h
@@ -18,16 +18,6 @@
                                          PyObject *kwdefs, PyObject *closure);
 
 #ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyEval_EvalCodeWithName(
-    PyObject *co,
-    PyObject *globals, PyObject *locals,
-    PyObject *const *args, Py_ssize_t argcount,
-    PyObject *const *kwnames, PyObject *const *kwargs,
-    Py_ssize_t kwcount, int kwstep,
-    PyObject *const *defs, Py_ssize_t defcount,
-    PyObject *kwdefs, PyObject *closure,
-    PyObject *name, PyObject *qualname);
-
 PyAPI_FUNC(PyObject *) _PyEval_CallTracing(PyObject *func, PyObject *args);
 #endif
 
diff --git a/include/fileutils.h b/include/fileutils.h
index 12bd071..16f3b63 100644
--- a/include/fileutils.h
+++ b/include/fileutils.h
@@ -12,10 +12,6 @@
 PyAPI_FUNC(char*) Py_EncodeLocale(
     const wchar_t *text,
     size_t *error_pos);
-
-PyAPI_FUNC(char*) _Py_EncodeLocaleRaw(
-    const wchar_t *text,
-    size_t *error_pos);
 #endif
 
 #ifndef Py_LIMITED_API
diff --git a/include/funcobject.h b/include/funcobject.h
index c5cc9d2..d7acd18 100644
--- a/include/funcobject.h
+++ b/include/funcobject.h
@@ -7,6 +7,21 @@
 extern "C" {
 #endif
 
+
+#define COMMON_FIELDS(PREFIX) \
+    PyObject *PREFIX ## globals; \
+    PyObject *PREFIX ## builtins; \
+    PyObject *PREFIX ## name; \
+    PyObject *PREFIX ## qualname; \
+    PyObject *PREFIX ## code;        /* A code object, the __code__ attribute */ \
+    PyObject *PREFIX ## defaults;    /* NULL or a tuple */ \
+    PyObject *PREFIX ## kwdefaults;  /* NULL or a dict */ \
+    PyObject *PREFIX ## closure;     /* NULL or a tuple of cell objects */
+
+typedef struct {
+    COMMON_FIELDS(fc_)
+} PyFrameConstructor;
+
 /* Function objects and code objects should not be confused with each other:
  *
  * Function objects are created by the execution of the 'def' statement.
@@ -20,18 +35,12 @@
 
 typedef struct {
     PyObject_HEAD
-    PyObject *func_code;        /* A code object, the __code__ attribute */
-    PyObject *func_globals;     /* A dictionary (other mappings won't do) */
-    PyObject *func_defaults;    /* NULL or a tuple */
-    PyObject *func_kwdefaults;  /* NULL or a dict */
-    PyObject *func_closure;     /* NULL or a tuple of cell objects */
+    COMMON_FIELDS(func_)
     PyObject *func_doc;         /* The __doc__ attribute, can be anything */
-    PyObject *func_name;        /* The __name__ attribute, a string object */
     PyObject *func_dict;        /* The __dict__ attribute, a dict or NULL */
     PyObject *func_weakreflist; /* List of weak references */
     PyObject *func_module;      /* The __module__ attribute, can be anything */
     PyObject *func_annotations; /* Annotations, a dict or NULL */
-    PyObject *func_qualname;    /* The qualified name */
     vectorcallfunc vectorcall;
 
     /* Invariant:
@@ -84,6 +93,9 @@
 #define PyFunction_GET_ANNOTATIONS(func) \
         (((PyFunctionObject *)func) -> func_annotations)
 
+#define PyFunction_AS_FRAME_CONSTRUCTOR(func) \
+        ((PyFrameConstructor *)&((PyFunctionObject *)(func))->func_globals)
+
 /* The classmethod and staticmethod types lives here, too */
 PyAPI_DATA(PyTypeObject) PyClassMethod_Type;
 PyAPI_DATA(PyTypeObject) PyStaticMethod_Type;
diff --git a/include/genobject.h b/include/genobject.h
index 8ffd156..e965334 100644
--- a/include/genobject.h
+++ b/include/genobject.h
@@ -9,6 +9,7 @@
 #endif
 
 #include "pystate.h"   /* _PyErr_StackItem */
+#include "abstract.h" /* PySendResult */
 
 /* _PyGenObject_HEAD defines the initial segment of generator
    and coroutine objects. */
@@ -16,8 +17,6 @@
     PyObject_HEAD                                                           \
     /* Note: gi_frame can be NULL if the generator is "finished" */         \
     PyFrameObject *prefix##_frame;                                          \
-    /* True if generator is being executed. */                              \
-    char prefix##_running;                                                  \
     /* The code object backing the generator */                             \
     PyObject *prefix##_code;                                                \
     /* List of weak reference. */                                           \
@@ -43,7 +42,6 @@
     PyObject *name, PyObject *qualname);
 PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *);
 PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **);
-PyAPI_FUNC(PyObject *) _PyGen_Send(PyGenObject *, PyObject *);
 PyObject *_PyGen_yf(PyGenObject *);
 PyAPI_FUNC(void) _PyGen_Finalize(PyObject *self);
 
diff --git a/include/graminit.h b/include/graminit.h
deleted file mode 100644
index d1027b7..0000000
--- a/include/graminit.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* Generated by Parser/pgen */
-
-#define single_input 256
-#define file_input 257
-#define eval_input 258
-#define decorator 259
-#define decorators 260
-#define decorated 261
-#define async_funcdef 262
-#define funcdef 263
-#define parameters 264
-#define typedargslist 265
-#define tfpdef 266
-#define varargslist 267
-#define vfpdef 268
-#define stmt 269
-#define simple_stmt 270
-#define small_stmt 271
-#define expr_stmt 272
-#define annassign 273
-#define testlist_star_expr 274
-#define augassign 275
-#define del_stmt 276
-#define pass_stmt 277
-#define flow_stmt 278
-#define break_stmt 279
-#define continue_stmt 280
-#define return_stmt 281
-#define yield_stmt 282
-#define raise_stmt 283
-#define import_stmt 284
-#define import_name 285
-#define import_from 286
-#define import_as_name 287
-#define dotted_as_name 288
-#define import_as_names 289
-#define dotted_as_names 290
-#define dotted_name 291
-#define global_stmt 292
-#define nonlocal_stmt 293
-#define assert_stmt 294
-#define compound_stmt 295
-#define async_stmt 296
-#define if_stmt 297
-#define while_stmt 298
-#define for_stmt 299
-#define try_stmt 300
-#define with_stmt 301
-#define with_item 302
-#define except_clause 303
-#define suite 304
-#define namedexpr_test 305
-#define test 306
-#define test_nocond 307
-#define lambdef 308
-#define lambdef_nocond 309
-#define or_test 310
-#define and_test 311
-#define not_test 312
-#define comparison 313
-#define comp_op 314
-#define star_expr 315
-#define expr 316
-#define xor_expr 317
-#define and_expr 318
-#define shift_expr 319
-#define arith_expr 320
-#define term 321
-#define factor 322
-#define power 323
-#define atom_expr 324
-#define atom 325
-#define testlist_comp 326
-#define trailer 327
-#define subscriptlist 328
-#define subscript 329
-#define sliceop 330
-#define exprlist 331
-#define testlist 332
-#define dictorsetmaker 333
-#define classdef 334
-#define arglist 335
-#define argument 336
-#define comp_iter 337
-#define sync_comp_for 338
-#define comp_for 339
-#define comp_if 340
-#define encoding_decl 341
-#define yield_expr 342
-#define yield_arg 343
-#define func_body_suite 344
-#define func_type_input 345
-#define func_type 346
-#define typelist 347
diff --git a/include/grammar.h b/include/grammar.h
deleted file mode 100644
index 4b66b1e..0000000
--- a/include/grammar.h
+++ /dev/null
@@ -1,77 +0,0 @@
-
-/* Grammar interface */
-
-#ifndef Py_GRAMMAR_H
-#define Py_GRAMMAR_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "bitset.h" /* Sigh... */
-
-/* A label of an arc */
-
-typedef struct {
-    int          lb_type;
-    const char  *lb_str;
-} label;
-
-#define EMPTY 0         /* Label number 0 is by definition the empty label */
-
-/* A list of labels */
-
-typedef struct {
-    int          ll_nlabels;
-    const label *ll_label;
-} labellist;
-
-/* An arc from one state to another */
-
-typedef struct {
-    short       a_lbl;          /* Label of this arc */
-    short       a_arrow;        /* State where this arc goes to */
-} arc;
-
-/* A state in a DFA */
-
-typedef struct {
-    int          s_narcs;
-    const arc   *s_arc;         /* Array of arcs */
-
-    /* Optional accelerators */
-    int          s_lower;       /* Lowest label index */
-    int          s_upper;       /* Highest label index */
-    int         *s_accel;       /* Accelerator */
-    int          s_accept;      /* Nonzero for accepting state */
-} state;
-
-/* A DFA */
-
-typedef struct {
-    int          d_type;        /* Non-terminal this represents */
-    char        *d_name;        /* For printing */
-    int          d_nstates;
-    state       *d_state;       /* Array of states */
-    bitset       d_first;
-} dfa;
-
-/* A grammar */
-
-typedef struct {
-    int          g_ndfas;
-    const dfa   *g_dfa;         /* Array of DFAs */
-    const labellist g_ll;
-    int          g_start;       /* Start symbol of the grammar */
-    int          g_accel;       /* Set if accelerators present */
-} grammar;
-
-/* FUNCTIONS */
-const dfa *PyGrammar_FindDFA(grammar *g, int type);
-const char *PyGrammar_LabelRepr(label *lb);
-void PyGrammar_AddAccelerators(grammar *g);
-void PyGrammar_RemoveAccelerators(grammar *);
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_GRAMMAR_H */
diff --git a/include/internal/pegen_interface.h b/include/internal/pegen_interface.h
deleted file mode 100644
index ee4c77e..0000000
--- a/include/internal/pegen_interface.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef Py_PEGENINTERFACE
-#define Py_PEGENINTERFACE
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_BUILD_CORE
-#  error "this header requires Py_BUILD_CORE define"
-#endif
-
-#include "Python.h"
-#include "Python-ast.h"
-
-PyAPI_FUNC(mod_ty) PyPegen_ASTFromString(
-    const char *str,
-    const char *filename,
-    int mode,
-    PyCompilerFlags *flags,
-    PyArena *arena);
-PyAPI_FUNC(mod_ty) PyPegen_ASTFromStringObject(
-    const char *str,
-    PyObject* filename,
-    int mode,
-    PyCompilerFlags *flags,
-    PyArena *arena);
-PyAPI_FUNC(mod_ty) PyPegen_ASTFromFileObject(
-    FILE *fp,
-    PyObject *filename_ob,
-    int mode,
-    const char *enc,
-    const char *ps1,
-    const char *ps2,
-    PyCompilerFlags *flags,
-    int *errcode,
-    PyArena *arena);
-PyAPI_FUNC(mod_ty) PyPegen_ASTFromFilename(
-    const char *filename,
-    int mode,
-    PyCompilerFlags *flags,
-    PyArena *arena);
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_PEGENINTERFACE*/
diff --git a/include/internal/pycore_asdl.h b/include/internal/pycore_asdl.h
new file mode 100644
index 0000000..c0b07c3
--- /dev/null
+++ b/include/internal/pycore_asdl.h
@@ -0,0 +1,112 @@
+#ifndef Py_INTERNAL_ASDL_H
+#define Py_INTERNAL_ASDL_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_pyarena.h"       // _PyArena_Malloc()
+
+typedef PyObject * identifier;
+typedef PyObject * string;
+typedef PyObject * object;
+typedef PyObject * constant;
+
+/* It would be nice if the code generated by asdl_c.py was completely
+   independent of Python, but it is a goal the requires too much work
+   at this stage.  So, for example, I'll represent identifiers as
+   interned Python strings.
+*/
+
+#define _ASDL_SEQ_HEAD \
+    Py_ssize_t size;   \
+    void **elements;
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+} asdl_seq;
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    void *typed_elements[1];
+} asdl_generic_seq;
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    PyObject *typed_elements[1];
+} asdl_identifier_seq;
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    int typed_elements[1];
+} asdl_int_seq;
+
+asdl_generic_seq *_Py_asdl_generic_seq_new(Py_ssize_t size, PyArena *arena);
+asdl_identifier_seq *_Py_asdl_identifier_seq_new(Py_ssize_t size, PyArena *arena);
+asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
+
+
+#define GENERATE_ASDL_SEQ_CONSTRUCTOR(NAME, TYPE) \
+asdl_ ## NAME ## _seq *_Py_asdl_ ## NAME ## _seq_new(Py_ssize_t size, PyArena *arena) \
+{ \
+    asdl_ ## NAME ## _seq *seq = NULL; \
+    size_t n; \
+    /* check size is sane */ \
+    if (size < 0 || \
+        (size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) { \
+        PyErr_NoMemory(); \
+        return NULL; \
+    } \
+    n = (size ? (sizeof(TYPE *) * (size - 1)) : 0); \
+    /* check if size can be added safely */ \
+    if (n > SIZE_MAX - sizeof(asdl_ ## NAME ## _seq)) { \
+        PyErr_NoMemory(); \
+        return NULL; \
+    } \
+    n += sizeof(asdl_ ## NAME ## _seq); \
+    seq = (asdl_ ## NAME ## _seq *)_PyArena_Malloc(arena, n); \
+    if (!seq) { \
+        PyErr_NoMemory(); \
+        return NULL; \
+    } \
+    memset(seq, 0, n); \
+    seq->size = size; \
+    seq->elements = (void**)seq->typed_elements; \
+    return seq; \
+}
+
+#define asdl_seq_GET_UNTYPED(S, I) (S)->elements[(I)]
+#define asdl_seq_GET(S, I) (S)->typed_elements[(I)]
+#define asdl_seq_LEN(S) ((S) == NULL ? 0 : (S)->size)
+
+#ifdef Py_DEBUG
+#  define asdl_seq_SET(S, I, V) \
+    do { \
+        Py_ssize_t _asdl_i = (I); \
+        assert((S) != NULL); \
+        assert(0 <= _asdl_i && _asdl_i < (S)->size); \
+        (S)->typed_elements[_asdl_i] = (V); \
+    } while (0)
+#else
+#  define asdl_seq_SET(S, I, V) (S)->typed_elements[I] = (V)
+#endif
+
+#ifdef Py_DEBUG
+#  define asdl_seq_SET_UNTYPED(S, I, V) \
+    do { \
+        Py_ssize_t _asdl_i = (I); \
+        assert((S) != NULL); \
+        assert(0 <= _asdl_i && _asdl_i < (S)->size); \
+        (S)->elements[_asdl_i] = (V); \
+    } while (0)
+#else
+#  define asdl_seq_SET_UNTYPED(S, I, V) (S)->elements[I] = (V)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_ASDL_H */
diff --git a/include/internal/pycore_ast.h b/include/internal/pycore_ast.h
new file mode 100644
index 0000000..ebb6a90
--- /dev/null
+++ b/include/internal/pycore_ast.h
@@ -0,0 +1,855 @@
+// File automatically generated by Parser/asdl_c.py.
+
+#ifndef Py_INTERNAL_AST_H
+#define Py_INTERNAL_AST_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_asdl.h"
+
+typedef struct _mod *mod_ty;
+
+typedef struct _stmt *stmt_ty;
+
+typedef struct _expr *expr_ty;
+
+typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
+
+typedef enum _boolop { And=1, Or=2 } boolop_ty;
+
+typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
+                         LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
+                         FloorDiv=13 } operator_ty;
+
+typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
+
+typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
+                      In=9, NotIn=10 } cmpop_ty;
+
+typedef struct _comprehension *comprehension_ty;
+
+typedef struct _excepthandler *excepthandler_ty;
+
+typedef struct _arguments *arguments_ty;
+
+typedef struct _arg *arg_ty;
+
+typedef struct _keyword *keyword_ty;
+
+typedef struct _alias *alias_ty;
+
+typedef struct _withitem *withitem_ty;
+
+typedef struct _match_case *match_case_ty;
+
+typedef struct _pattern *pattern_ty;
+
+typedef struct _type_ignore *type_ignore_ty;
+
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    mod_ty typed_elements[1];
+} asdl_mod_seq;
+
+asdl_mod_seq *_Py_asdl_mod_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    stmt_ty typed_elements[1];
+} asdl_stmt_seq;
+
+asdl_stmt_seq *_Py_asdl_stmt_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    expr_ty typed_elements[1];
+} asdl_expr_seq;
+
+asdl_expr_seq *_Py_asdl_expr_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    comprehension_ty typed_elements[1];
+} asdl_comprehension_seq;
+
+asdl_comprehension_seq *_Py_asdl_comprehension_seq_new(Py_ssize_t size, PyArena
+                                                       *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    excepthandler_ty typed_elements[1];
+} asdl_excepthandler_seq;
+
+asdl_excepthandler_seq *_Py_asdl_excepthandler_seq_new(Py_ssize_t size, PyArena
+                                                       *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    arguments_ty typed_elements[1];
+} asdl_arguments_seq;
+
+asdl_arguments_seq *_Py_asdl_arguments_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    arg_ty typed_elements[1];
+} asdl_arg_seq;
+
+asdl_arg_seq *_Py_asdl_arg_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    keyword_ty typed_elements[1];
+} asdl_keyword_seq;
+
+asdl_keyword_seq *_Py_asdl_keyword_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    alias_ty typed_elements[1];
+} asdl_alias_seq;
+
+asdl_alias_seq *_Py_asdl_alias_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    withitem_ty typed_elements[1];
+} asdl_withitem_seq;
+
+asdl_withitem_seq *_Py_asdl_withitem_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    match_case_ty typed_elements[1];
+} asdl_match_case_seq;
+
+asdl_match_case_seq *_Py_asdl_match_case_seq_new(Py_ssize_t size, PyArena
+                                                 *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    pattern_ty typed_elements[1];
+} asdl_pattern_seq;
+
+asdl_pattern_seq *_Py_asdl_pattern_seq_new(Py_ssize_t size, PyArena *arena);
+
+typedef struct {
+    _ASDL_SEQ_HEAD
+    type_ignore_ty typed_elements[1];
+} asdl_type_ignore_seq;
+
+asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena
+                                                   *arena);
+
+
+enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
+                 FunctionType_kind=4};
+struct _mod {
+    enum _mod_kind kind;
+    union {
+        struct {
+            asdl_stmt_seq *body;
+            asdl_type_ignore_seq *type_ignores;
+        } Module;
+
+        struct {
+            asdl_stmt_seq *body;
+        } Interactive;
+
+        struct {
+            expr_ty body;
+        } Expression;
+
+        struct {
+            asdl_expr_seq *argtypes;
+            expr_ty returns;
+        } FunctionType;
+
+    } v;
+};
+
+enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
+                  Return_kind=4, Delete_kind=5, Assign_kind=6,
+                  AugAssign_kind=7, AnnAssign_kind=8, For_kind=9,
+                  AsyncFor_kind=10, While_kind=11, If_kind=12, With_kind=13,
+                  AsyncWith_kind=14, Match_kind=15, Raise_kind=16, Try_kind=17,
+                  Assert_kind=18, Import_kind=19, ImportFrom_kind=20,
+                  Global_kind=21, Nonlocal_kind=22, Expr_kind=23, Pass_kind=24,
+                  Break_kind=25, Continue_kind=26};
+struct _stmt {
+    enum _stmt_kind kind;
+    union {
+        struct {
+            identifier name;
+            arguments_ty args;
+            asdl_stmt_seq *body;
+            asdl_expr_seq *decorator_list;
+            expr_ty returns;
+            string type_comment;
+        } FunctionDef;
+
+        struct {
+            identifier name;
+            arguments_ty args;
+            asdl_stmt_seq *body;
+            asdl_expr_seq *decorator_list;
+            expr_ty returns;
+            string type_comment;
+        } AsyncFunctionDef;
+
+        struct {
+            identifier name;
+            asdl_expr_seq *bases;
+            asdl_keyword_seq *keywords;
+            asdl_stmt_seq *body;
+            asdl_expr_seq *decorator_list;
+        } ClassDef;
+
+        struct {
+            expr_ty value;
+        } Return;
+
+        struct {
+            asdl_expr_seq *targets;
+        } Delete;
+
+        struct {
+            asdl_expr_seq *targets;
+            expr_ty value;
+            string type_comment;
+        } Assign;
+
+        struct {
+            expr_ty target;
+            operator_ty op;
+            expr_ty value;
+        } AugAssign;
+
+        struct {
+            expr_ty target;
+            expr_ty annotation;
+            expr_ty value;
+            int simple;
+        } AnnAssign;
+
+        struct {
+            expr_ty target;
+            expr_ty iter;
+            asdl_stmt_seq *body;
+            asdl_stmt_seq *orelse;
+            string type_comment;
+        } For;
+
+        struct {
+            expr_ty target;
+            expr_ty iter;
+            asdl_stmt_seq *body;
+            asdl_stmt_seq *orelse;
+            string type_comment;
+        } AsyncFor;
+
+        struct {
+            expr_ty test;
+            asdl_stmt_seq *body;
+            asdl_stmt_seq *orelse;
+        } While;
+
+        struct {
+            expr_ty test;
+            asdl_stmt_seq *body;
+            asdl_stmt_seq *orelse;
+        } If;
+
+        struct {
+            asdl_withitem_seq *items;
+            asdl_stmt_seq *body;
+            string type_comment;
+        } With;
+
+        struct {
+            asdl_withitem_seq *items;
+            asdl_stmt_seq *body;
+            string type_comment;
+        } AsyncWith;
+
+        struct {
+            expr_ty subject;
+            asdl_match_case_seq *cases;
+        } Match;
+
+        struct {
+            expr_ty exc;
+            expr_ty cause;
+        } Raise;
+
+        struct {
+            asdl_stmt_seq *body;
+            asdl_excepthandler_seq *handlers;
+            asdl_stmt_seq *orelse;
+            asdl_stmt_seq *finalbody;
+        } Try;
+
+        struct {
+            expr_ty test;
+            expr_ty msg;
+        } Assert;
+
+        struct {
+            asdl_alias_seq *names;
+        } Import;
+
+        struct {
+            identifier module;
+            asdl_alias_seq *names;
+            int level;
+        } ImportFrom;
+
+        struct {
+            asdl_identifier_seq *names;
+        } Global;
+
+        struct {
+            asdl_identifier_seq *names;
+        } Nonlocal;
+
+        struct {
+            expr_ty value;
+        } Expr;
+
+    } v;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
+                  Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
+                  ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
+                  GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
+                  YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
+                  FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
+                  Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
+                  Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
+struct _expr {
+    enum _expr_kind kind;
+    union {
+        struct {
+            boolop_ty op;
+            asdl_expr_seq *values;
+        } BoolOp;
+
+        struct {
+            expr_ty target;
+            expr_ty value;
+        } NamedExpr;
+
+        struct {
+            expr_ty left;
+            operator_ty op;
+            expr_ty right;
+        } BinOp;
+
+        struct {
+            unaryop_ty op;
+            expr_ty operand;
+        } UnaryOp;
+
+        struct {
+            arguments_ty args;
+            expr_ty body;
+        } Lambda;
+
+        struct {
+            expr_ty test;
+            expr_ty body;
+            expr_ty orelse;
+        } IfExp;
+
+        struct {
+            asdl_expr_seq *keys;
+            asdl_expr_seq *values;
+        } Dict;
+
+        struct {
+            asdl_expr_seq *elts;
+        } Set;
+
+        struct {
+            expr_ty elt;
+            asdl_comprehension_seq *generators;
+        } ListComp;
+
+        struct {
+            expr_ty elt;
+            asdl_comprehension_seq *generators;
+        } SetComp;
+
+        struct {
+            expr_ty key;
+            expr_ty value;
+            asdl_comprehension_seq *generators;
+        } DictComp;
+
+        struct {
+            expr_ty elt;
+            asdl_comprehension_seq *generators;
+        } GeneratorExp;
+
+        struct {
+            expr_ty value;
+        } Await;
+
+        struct {
+            expr_ty value;
+        } Yield;
+
+        struct {
+            expr_ty value;
+        } YieldFrom;
+
+        struct {
+            expr_ty left;
+            asdl_int_seq *ops;
+            asdl_expr_seq *comparators;
+        } Compare;
+
+        struct {
+            expr_ty func;
+            asdl_expr_seq *args;
+            asdl_keyword_seq *keywords;
+        } Call;
+
+        struct {
+            expr_ty value;
+            int conversion;
+            expr_ty format_spec;
+        } FormattedValue;
+
+        struct {
+            asdl_expr_seq *values;
+        } JoinedStr;
+
+        struct {
+            constant value;
+            string kind;
+        } Constant;
+
+        struct {
+            expr_ty value;
+            identifier attr;
+            expr_context_ty ctx;
+        } Attribute;
+
+        struct {
+            expr_ty value;
+            expr_ty slice;
+            expr_context_ty ctx;
+        } Subscript;
+
+        struct {
+            expr_ty value;
+            expr_context_ty ctx;
+        } Starred;
+
+        struct {
+            identifier id;
+            expr_context_ty ctx;
+        } Name;
+
+        struct {
+            asdl_expr_seq *elts;
+            expr_context_ty ctx;
+        } List;
+
+        struct {
+            asdl_expr_seq *elts;
+            expr_context_ty ctx;
+        } Tuple;
+
+        struct {
+            expr_ty lower;
+            expr_ty upper;
+            expr_ty step;
+        } Slice;
+
+    } v;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+struct _comprehension {
+    expr_ty target;
+    expr_ty iter;
+    asdl_expr_seq *ifs;
+    int is_async;
+};
+
+enum _excepthandler_kind {ExceptHandler_kind=1};
+struct _excepthandler {
+    enum _excepthandler_kind kind;
+    union {
+        struct {
+            expr_ty type;
+            identifier name;
+            asdl_stmt_seq *body;
+        } ExceptHandler;
+
+    } v;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+struct _arguments {
+    asdl_arg_seq *posonlyargs;
+    asdl_arg_seq *args;
+    arg_ty vararg;
+    asdl_arg_seq *kwonlyargs;
+    asdl_expr_seq *kw_defaults;
+    arg_ty kwarg;
+    asdl_expr_seq *defaults;
+};
+
+struct _arg {
+    identifier arg;
+    expr_ty annotation;
+    string type_comment;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+struct _keyword {
+    identifier arg;
+    expr_ty value;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+struct _alias {
+    identifier name;
+    identifier asname;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+struct _withitem {
+    expr_ty context_expr;
+    expr_ty optional_vars;
+};
+
+struct _match_case {
+    pattern_ty pattern;
+    expr_ty guard;
+    asdl_stmt_seq *body;
+};
+
+enum _pattern_kind {MatchValue_kind=1, MatchSingleton_kind=2,
+                     MatchSequence_kind=3, MatchMapping_kind=4,
+                     MatchClass_kind=5, MatchStar_kind=6, MatchAs_kind=7,
+                     MatchOr_kind=8};
+struct _pattern {
+    enum _pattern_kind kind;
+    union {
+        struct {
+            expr_ty value;
+        } MatchValue;
+
+        struct {
+            constant value;
+        } MatchSingleton;
+
+        struct {
+            asdl_pattern_seq *patterns;
+        } MatchSequence;
+
+        struct {
+            asdl_expr_seq *keys;
+            asdl_pattern_seq *patterns;
+            identifier rest;
+        } MatchMapping;
+
+        struct {
+            expr_ty cls;
+            asdl_pattern_seq *patterns;
+            asdl_identifier_seq *kwd_attrs;
+            asdl_pattern_seq *kwd_patterns;
+        } MatchClass;
+
+        struct {
+            identifier name;
+        } MatchStar;
+
+        struct {
+            pattern_ty pattern;
+            identifier name;
+        } MatchAs;
+
+        struct {
+            asdl_pattern_seq *patterns;
+        } MatchOr;
+
+    } v;
+    int lineno;
+    int col_offset;
+    int end_lineno;
+    int end_col_offset;
+};
+
+enum _type_ignore_kind {TypeIgnore_kind=1};
+struct _type_ignore {
+    enum _type_ignore_kind kind;
+    union {
+        struct {
+            int lineno;
+            string tag;
+        } TypeIgnore;
+
+    } v;
+};
+
+
+// Note: these macros affect function definitions, not only call sites.
+mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores,
+                     PyArena *arena);
+mod_ty _PyAST_Interactive(asdl_stmt_seq * body, PyArena *arena);
+mod_ty _PyAST_Expression(expr_ty body, PyArena *arena);
+mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena
+                           *arena);
+stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq *
+                           body, asdl_expr_seq * decorator_list, expr_ty
+                           returns, string type_comment, int lineno, int
+                           col_offset, int end_lineno, int end_col_offset,
+                           PyArena *arena);
+stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args,
+                                asdl_stmt_seq * body, asdl_expr_seq *
+                                decorator_list, expr_ty returns, string
+                                type_comment, int lineno, int col_offset, int
+                                end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases,
+                        asdl_keyword_seq * keywords, asdl_stmt_seq * body,
+                        asdl_expr_seq * decorator_list, int lineno, int
+                        col_offset, int end_lineno, int end_col_offset, PyArena
+                        *arena);
+stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int
+                      end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int
+                      end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string
+                      type_comment, int lineno, int col_offset, int end_lineno,
+                      int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
+                         lineno, int col_offset, int end_lineno, int
+                         end_col_offset, PyArena *arena);
+stmt_ty _PyAST_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
+                         simple, int lineno, int col_offset, int end_lineno,
+                         int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_For(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
+                   asdl_stmt_seq * orelse, string type_comment, int lineno, int
+                   col_offset, int end_lineno, int end_col_offset, PyArena
+                   *arena);
+stmt_ty _PyAST_AsyncFor(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
+                        asdl_stmt_seq * orelse, string type_comment, int
+                        lineno, int col_offset, int end_lineno, int
+                        end_col_offset, PyArena *arena);
+stmt_ty _PyAST_While(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq *
+                     orelse, int lineno, int col_offset, int end_lineno, int
+                     end_col_offset, PyArena *arena);
+stmt_ty _PyAST_If(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq * orelse,
+                  int lineno, int col_offset, int end_lineno, int
+                  end_col_offset, PyArena *arena);
+stmt_ty _PyAST_With(asdl_withitem_seq * items, asdl_stmt_seq * body, string
+                    type_comment, int lineno, int col_offset, int end_lineno,
+                    int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_AsyncWith(asdl_withitem_seq * items, asdl_stmt_seq * body,
+                         string type_comment, int lineno, int col_offset, int
+                         end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Match(expr_ty subject, asdl_match_case_seq * cases, int lineno,
+                     int col_offset, int end_lineno, int end_col_offset,
+                     PyArena *arena);
+stmt_ty _PyAST_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset,
+                     int end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Try(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
+                   asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
+                   lineno, int col_offset, int end_lineno, int end_col_offset,
+                   PyArena *arena);
+stmt_ty _PyAST_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset,
+                      int end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Import(asdl_alias_seq * names, int lineno, int col_offset, int
+                      end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_ImportFrom(identifier module, asdl_alias_seq * names, int level,
+                          int lineno, int col_offset, int end_lineno, int
+                          end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Global(asdl_identifier_seq * names, int lineno, int col_offset,
+                      int end_lineno, int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Nonlocal(asdl_identifier_seq * names, int lineno, int
+                        col_offset, int end_lineno, int end_col_offset, PyArena
+                        *arena);
+stmt_ty _PyAST_Expr(expr_ty value, int lineno, int col_offset, int end_lineno,
+                    int end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Pass(int lineno, int col_offset, int end_lineno, int
+                    end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Break(int lineno, int col_offset, int end_lineno, int
+                     end_col_offset, PyArena *arena);
+stmt_ty _PyAST_Continue(int lineno, int col_offset, int end_lineno, int
+                        end_col_offset, PyArena *arena);
+expr_ty _PyAST_BoolOp(boolop_ty op, asdl_expr_seq * values, int lineno, int
+                      col_offset, int end_lineno, int end_col_offset, PyArena
+                      *arena);
+expr_ty _PyAST_NamedExpr(expr_ty target, expr_ty value, int lineno, int
+                         col_offset, int end_lineno, int end_col_offset,
+                         PyArena *arena);
+expr_ty _PyAST_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno,
+                     int col_offset, int end_lineno, int end_col_offset,
+                     PyArena *arena);
+expr_ty _PyAST_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int
+                       col_offset, int end_lineno, int end_col_offset, PyArena
+                       *arena);
+expr_ty _PyAST_Lambda(arguments_ty args, expr_ty body, int lineno, int
+                      col_offset, int end_lineno, int end_col_offset, PyArena
+                      *arena);
+expr_ty _PyAST_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno,
+                     int col_offset, int end_lineno, int end_col_offset,
+                     PyArena *arena);
+expr_ty _PyAST_Dict(asdl_expr_seq * keys, asdl_expr_seq * values, int lineno,
+                    int col_offset, int end_lineno, int end_col_offset, PyArena
+                    *arena);
+expr_ty _PyAST_Set(asdl_expr_seq * elts, int lineno, int col_offset, int
+                   end_lineno, int end_col_offset, PyArena *arena);
+expr_ty _PyAST_ListComp(expr_ty elt, asdl_comprehension_seq * generators, int
+                        lineno, int col_offset, int end_lineno, int
+                        end_col_offset, PyArena *arena);
+expr_ty _PyAST_SetComp(expr_ty elt, asdl_comprehension_seq * generators, int
+                       lineno, int col_offset, int end_lineno, int
+                       end_col_offset, PyArena *arena);
+expr_ty _PyAST_DictComp(expr_ty key, expr_ty value, asdl_comprehension_seq *
+                        generators, int lineno, int col_offset, int end_lineno,
+                        int end_col_offset, PyArena *arena);
+expr_ty _PyAST_GeneratorExp(expr_ty elt, asdl_comprehension_seq * generators,
+                            int lineno, int col_offset, int end_lineno, int
+                            end_col_offset, PyArena *arena);
+expr_ty _PyAST_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
+                     int end_col_offset, PyArena *arena);
+expr_ty _PyAST_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
+                     int end_col_offset, PyArena *arena);
+expr_ty _PyAST_YieldFrom(expr_ty value, int lineno, int col_offset, int
+                         end_lineno, int end_col_offset, PyArena *arena);
+expr_ty _PyAST_Compare(expr_ty left, asdl_int_seq * ops, asdl_expr_seq *
+                       comparators, int lineno, int col_offset, int end_lineno,
+                       int end_col_offset, PyArena *arena);
+expr_ty _PyAST_Call(expr_ty func, asdl_expr_seq * args, asdl_keyword_seq *
+                    keywords, int lineno, int col_offset, int end_lineno, int
+                    end_col_offset, PyArena *arena);
+expr_ty _PyAST_FormattedValue(expr_ty value, int conversion, expr_ty
+                              format_spec, int lineno, int col_offset, int
+                              end_lineno, int end_col_offset, PyArena *arena);
+expr_ty _PyAST_JoinedStr(asdl_expr_seq * values, int lineno, int col_offset,
+                         int end_lineno, int end_col_offset, PyArena *arena);
+expr_ty _PyAST_Constant(constant value, string kind, int lineno, int
+                        col_offset, int end_lineno, int end_col_offset, PyArena
+                        *arena);
+expr_ty _PyAST_Attribute(expr_ty value, identifier attr, expr_context_ty ctx,
+                         int lineno, int col_offset, int end_lineno, int
+                         end_col_offset, PyArena *arena);
+expr_ty _PyAST_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
+                         lineno, int col_offset, int end_lineno, int
+                         end_col_offset, PyArena *arena);
+expr_ty _PyAST_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
+                       col_offset, int end_lineno, int end_col_offset, PyArena
+                       *arena);
+expr_ty _PyAST_Name(identifier id, expr_context_ty ctx, int lineno, int
+                    col_offset, int end_lineno, int end_col_offset, PyArena
+                    *arena);
+expr_ty _PyAST_List(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
+                    col_offset, int end_lineno, int end_col_offset, PyArena
+                    *arena);
+expr_ty _PyAST_Tuple(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
+                     col_offset, int end_lineno, int end_col_offset, PyArena
+                     *arena);
+expr_ty _PyAST_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno,
+                     int col_offset, int end_lineno, int end_col_offset,
+                     PyArena *arena);
+comprehension_ty _PyAST_comprehension(expr_ty target, expr_ty iter,
+                                      asdl_expr_seq * ifs, int is_async,
+                                      PyArena *arena);
+excepthandler_ty _PyAST_ExceptHandler(expr_ty type, identifier name,
+                                      asdl_stmt_seq * body, int lineno, int
+                                      col_offset, int end_lineno, int
+                                      end_col_offset, PyArena *arena);
+arguments_ty _PyAST_arguments(asdl_arg_seq * posonlyargs, asdl_arg_seq * args,
+                              arg_ty vararg, asdl_arg_seq * kwonlyargs,
+                              asdl_expr_seq * kw_defaults, arg_ty kwarg,
+                              asdl_expr_seq * defaults, PyArena *arena);
+arg_ty _PyAST_arg(identifier arg, expr_ty annotation, string type_comment, int
+                  lineno, int col_offset, int end_lineno, int end_col_offset,
+                  PyArena *arena);
+keyword_ty _PyAST_keyword(identifier arg, expr_ty value, int lineno, int
+                          col_offset, int end_lineno, int end_col_offset,
+                          PyArena *arena);
+alias_ty _PyAST_alias(identifier name, identifier asname, int lineno, int
+                      col_offset, int end_lineno, int end_col_offset, PyArena
+                      *arena);
+withitem_ty _PyAST_withitem(expr_ty context_expr, expr_ty optional_vars,
+                            PyArena *arena);
+match_case_ty _PyAST_match_case(pattern_ty pattern, expr_ty guard,
+                                asdl_stmt_seq * body, PyArena *arena);
+pattern_ty _PyAST_MatchValue(expr_ty value, int lineno, int col_offset, int
+                             end_lineno, int end_col_offset, PyArena *arena);
+pattern_ty _PyAST_MatchSingleton(constant value, int lineno, int col_offset,
+                                 int end_lineno, int end_col_offset, PyArena
+                                 *arena);
+pattern_ty _PyAST_MatchSequence(asdl_pattern_seq * patterns, int lineno, int
+                                col_offset, int end_lineno, int end_col_offset,
+                                PyArena *arena);
+pattern_ty _PyAST_MatchMapping(asdl_expr_seq * keys, asdl_pattern_seq *
+                               patterns, identifier rest, int lineno, int
+                               col_offset, int end_lineno, int end_col_offset,
+                               PyArena *arena);
+pattern_ty _PyAST_MatchClass(expr_ty cls, asdl_pattern_seq * patterns,
+                             asdl_identifier_seq * kwd_attrs, asdl_pattern_seq
+                             * kwd_patterns, int lineno, int col_offset, int
+                             end_lineno, int end_col_offset, PyArena *arena);
+pattern_ty _PyAST_MatchStar(identifier name, int lineno, int col_offset, int
+                            end_lineno, int end_col_offset, PyArena *arena);
+pattern_ty _PyAST_MatchAs(pattern_ty pattern, identifier name, int lineno, int
+                          col_offset, int end_lineno, int end_col_offset,
+                          PyArena *arena);
+pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int
+                          col_offset, int end_lineno, int end_col_offset,
+                          PyArena *arena);
+type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena);
+
+
+PyObject* PyAST_mod2obj(mod_ty t);
+mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
+int PyAST_Check(PyObject* obj);
+
+extern int _PyAST_Validate(mod_ty);
+
+/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
+extern PyObject* _PyAST_ExprAsUnicode(expr_ty);
+
+/* Return the borrowed reference to the first literal string in the
+   sequence of statements or NULL if it doesn't start from a literal string.
+   Doesn't set exception. */
+extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_AST_H */
diff --git a/include/internal/pycore_ast_state.h b/include/internal/pycore_ast_state.h
new file mode 100644
index 0000000..882cd09
--- /dev/null
+++ b/include/internal/pycore_ast_state.h
@@ -0,0 +1,255 @@
+// File automatically generated by Parser/asdl_c.py.
+
+#ifndef Py_INTERNAL_AST_STATE_H
+#define Py_INTERNAL_AST_STATE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+struct ast_state {
+    int initialized;
+    PyObject *AST_type;
+    PyObject *Add_singleton;
+    PyObject *Add_type;
+    PyObject *And_singleton;
+    PyObject *And_type;
+    PyObject *AnnAssign_type;
+    PyObject *Assert_type;
+    PyObject *Assign_type;
+    PyObject *AsyncFor_type;
+    PyObject *AsyncFunctionDef_type;
+    PyObject *AsyncWith_type;
+    PyObject *Attribute_type;
+    PyObject *AugAssign_type;
+    PyObject *Await_type;
+    PyObject *BinOp_type;
+    PyObject *BitAnd_singleton;
+    PyObject *BitAnd_type;
+    PyObject *BitOr_singleton;
+    PyObject *BitOr_type;
+    PyObject *BitXor_singleton;
+    PyObject *BitXor_type;
+    PyObject *BoolOp_type;
+    PyObject *Break_type;
+    PyObject *Call_type;
+    PyObject *ClassDef_type;
+    PyObject *Compare_type;
+    PyObject *Constant_type;
+    PyObject *Continue_type;
+    PyObject *Del_singleton;
+    PyObject *Del_type;
+    PyObject *Delete_type;
+    PyObject *DictComp_type;
+    PyObject *Dict_type;
+    PyObject *Div_singleton;
+    PyObject *Div_type;
+    PyObject *Eq_singleton;
+    PyObject *Eq_type;
+    PyObject *ExceptHandler_type;
+    PyObject *Expr_type;
+    PyObject *Expression_type;
+    PyObject *FloorDiv_singleton;
+    PyObject *FloorDiv_type;
+    PyObject *For_type;
+    PyObject *FormattedValue_type;
+    PyObject *FunctionDef_type;
+    PyObject *FunctionType_type;
+    PyObject *GeneratorExp_type;
+    PyObject *Global_type;
+    PyObject *GtE_singleton;
+    PyObject *GtE_type;
+    PyObject *Gt_singleton;
+    PyObject *Gt_type;
+    PyObject *IfExp_type;
+    PyObject *If_type;
+    PyObject *ImportFrom_type;
+    PyObject *Import_type;
+    PyObject *In_singleton;
+    PyObject *In_type;
+    PyObject *Interactive_type;
+    PyObject *Invert_singleton;
+    PyObject *Invert_type;
+    PyObject *IsNot_singleton;
+    PyObject *IsNot_type;
+    PyObject *Is_singleton;
+    PyObject *Is_type;
+    PyObject *JoinedStr_type;
+    PyObject *LShift_singleton;
+    PyObject *LShift_type;
+    PyObject *Lambda_type;
+    PyObject *ListComp_type;
+    PyObject *List_type;
+    PyObject *Load_singleton;
+    PyObject *Load_type;
+    PyObject *LtE_singleton;
+    PyObject *LtE_type;
+    PyObject *Lt_singleton;
+    PyObject *Lt_type;
+    PyObject *MatMult_singleton;
+    PyObject *MatMult_type;
+    PyObject *MatchAs_type;
+    PyObject *MatchClass_type;
+    PyObject *MatchMapping_type;
+    PyObject *MatchOr_type;
+    PyObject *MatchSequence_type;
+    PyObject *MatchSingleton_type;
+    PyObject *MatchStar_type;
+    PyObject *MatchValue_type;
+    PyObject *Match_type;
+    PyObject *Mod_singleton;
+    PyObject *Mod_type;
+    PyObject *Module_type;
+    PyObject *Mult_singleton;
+    PyObject *Mult_type;
+    PyObject *Name_type;
+    PyObject *NamedExpr_type;
+    PyObject *Nonlocal_type;
+    PyObject *NotEq_singleton;
+    PyObject *NotEq_type;
+    PyObject *NotIn_singleton;
+    PyObject *NotIn_type;
+    PyObject *Not_singleton;
+    PyObject *Not_type;
+    PyObject *Or_singleton;
+    PyObject *Or_type;
+    PyObject *Pass_type;
+    PyObject *Pow_singleton;
+    PyObject *Pow_type;
+    PyObject *RShift_singleton;
+    PyObject *RShift_type;
+    PyObject *Raise_type;
+    PyObject *Return_type;
+    PyObject *SetComp_type;
+    PyObject *Set_type;
+    PyObject *Slice_type;
+    PyObject *Starred_type;
+    PyObject *Store_singleton;
+    PyObject *Store_type;
+    PyObject *Sub_singleton;
+    PyObject *Sub_type;
+    PyObject *Subscript_type;
+    PyObject *Try_type;
+    PyObject *Tuple_type;
+    PyObject *TypeIgnore_type;
+    PyObject *UAdd_singleton;
+    PyObject *UAdd_type;
+    PyObject *USub_singleton;
+    PyObject *USub_type;
+    PyObject *UnaryOp_type;
+    PyObject *While_type;
+    PyObject *With_type;
+    PyObject *YieldFrom_type;
+    PyObject *Yield_type;
+    PyObject *__dict__;
+    PyObject *__doc__;
+    PyObject *__match_args__;
+    PyObject *__module__;
+    PyObject *_attributes;
+    PyObject *_fields;
+    PyObject *alias_type;
+    PyObject *annotation;
+    PyObject *arg;
+    PyObject *arg_type;
+    PyObject *args;
+    PyObject *argtypes;
+    PyObject *arguments_type;
+    PyObject *asname;
+    PyObject *ast;
+    PyObject *attr;
+    PyObject *bases;
+    PyObject *body;
+    PyObject *boolop_type;
+    PyObject *cases;
+    PyObject *cause;
+    PyObject *cls;
+    PyObject *cmpop_type;
+    PyObject *col_offset;
+    PyObject *comparators;
+    PyObject *comprehension_type;
+    PyObject *context_expr;
+    PyObject *conversion;
+    PyObject *ctx;
+    PyObject *decorator_list;
+    PyObject *defaults;
+    PyObject *elt;
+    PyObject *elts;
+    PyObject *end_col_offset;
+    PyObject *end_lineno;
+    PyObject *exc;
+    PyObject *excepthandler_type;
+    PyObject *expr_context_type;
+    PyObject *expr_type;
+    PyObject *finalbody;
+    PyObject *format_spec;
+    PyObject *func;
+    PyObject *generators;
+    PyObject *guard;
+    PyObject *handlers;
+    PyObject *id;
+    PyObject *ifs;
+    PyObject *is_async;
+    PyObject *items;
+    PyObject *iter;
+    PyObject *key;
+    PyObject *keys;
+    PyObject *keyword_type;
+    PyObject *keywords;
+    PyObject *kind;
+    PyObject *kw_defaults;
+    PyObject *kwarg;
+    PyObject *kwd_attrs;
+    PyObject *kwd_patterns;
+    PyObject *kwonlyargs;
+    PyObject *left;
+    PyObject *level;
+    PyObject *lineno;
+    PyObject *lower;
+    PyObject *match_case_type;
+    PyObject *mod_type;
+    PyObject *module;
+    PyObject *msg;
+    PyObject *name;
+    PyObject *names;
+    PyObject *op;
+    PyObject *operand;
+    PyObject *operator_type;
+    PyObject *ops;
+    PyObject *optional_vars;
+    PyObject *orelse;
+    PyObject *pattern;
+    PyObject *pattern_type;
+    PyObject *patterns;
+    PyObject *posonlyargs;
+    PyObject *rest;
+    PyObject *returns;
+    PyObject *right;
+    PyObject *simple;
+    PyObject *slice;
+    PyObject *step;
+    PyObject *stmt_type;
+    PyObject *subject;
+    PyObject *tag;
+    PyObject *target;
+    PyObject *targets;
+    PyObject *test;
+    PyObject *type;
+    PyObject *type_comment;
+    PyObject *type_ignore_type;
+    PyObject *type_ignores;
+    PyObject *unaryop_type;
+    PyObject *upper;
+    PyObject *value;
+    PyObject *values;
+    PyObject *vararg;
+    PyObject *withitem_type;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_AST_STATE_H */
+
diff --git a/include/internal/pycore_atomic.h b/include/internal/pycore_atomic.h
index 1d5c562..3d42e54 100644
--- a/include/internal/pycore_atomic.h
+++ b/include/internal/pycore_atomic.h
@@ -11,8 +11,8 @@
 #include "dynamic_annotations.h"   /* _Py_ANNOTATE_MEMORY_ORDER */
 #include "pyconfig.h"
 
-#if defined(HAVE_STD_ATOMIC)
-#include <stdatomic.h>
+#ifdef HAVE_STD_ATOMIC
+#  include <stdatomic.h>
 #endif
 
 
@@ -62,7 +62,7 @@
 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
     atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
 
-/* Use builtin atomic operations in GCC >= 4.7 */
+// Use builtin atomic operations in GCC >= 4.7 and clang
 #elif defined(HAVE_BUILTIN_ATOMIC)
 
 typedef enum _Py_memory_order {
diff --git a/include/internal/pycore_atomic_funcs.h b/include/internal/pycore_atomic_funcs.h
new file mode 100644
index 0000000..a708789
--- /dev/null
+++ b/include/internal/pycore_atomic_funcs.h
@@ -0,0 +1,94 @@
+/* Atomic functions: similar to pycore_atomic.h, but don't need
+   to declare variables as atomic.
+
+   Py_ssize_t type:
+
+   * value = _Py_atomic_size_get(&var)
+   * _Py_atomic_size_set(&var, value)
+
+   Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order):
+   enforce total ordering with all other atomic functions.
+*/
+#ifndef Py_ATOMIC_FUNC_H
+#define Py_ATOMIC_FUNC_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#if defined(_MSC_VER)
+#  include <intrin.h>             // _InterlockedExchange()
+#endif
+
+
+// Use builtin atomic operations in GCC >= 4.7 and clang
+#ifdef HAVE_BUILTIN_ATOMIC
+
+static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
+{
+    return __atomic_load_n(var, __ATOMIC_SEQ_CST);
+}
+
+static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
+{
+    __atomic_store_n(var, value, __ATOMIC_SEQ_CST);
+}
+
+#elif defined(_MSC_VER)
+
+static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
+{
+#if SIZEOF_VOID_P == 8
+    Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
+    volatile __int64 *volatile_var = (volatile __int64 *)var;
+    __int64 old;
+    do {
+        old = *volatile_var;
+    } while(_InterlockedCompareExchange64(volatile_var, old, old) != old);
+#else
+    Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
+    volatile long *volatile_var = (volatile long *)var;
+    long old;
+    do {
+        old = *volatile_var;
+    } while(_InterlockedCompareExchange(volatile_var, old, old) != old);
+#endif
+    return old;
+}
+
+static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
+{
+#if SIZEOF_VOID_P == 8
+    Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
+    volatile __int64 *volatile_var = (volatile __int64 *)var;
+    _InterlockedExchange64(volatile_var, value);
+#else
+    Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
+    volatile long *volatile_var = (volatile long *)var;
+    _InterlockedExchange(volatile_var, value);
+#endif
+}
+
+#else
+// Fallback implementation using volatile
+
+static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
+{
+    volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
+    return *volatile_var;
+}
+
+static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
+{
+    volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
+    *volatile_var = value;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif  /* Py_ATOMIC_FUNC_H */
diff --git a/include/internal/pycore_bitutils.h b/include/internal/pycore_bitutils.h
new file mode 100644
index 0000000..e4aa7a3
--- /dev/null
+++ b/include/internal/pycore_bitutils.h
@@ -0,0 +1,176 @@
+/* Bit and bytes utilities.
+
+   Bytes swap functions, reverse order of bytes:
+
+   - _Py_bswap16(uint16_t)
+   - _Py_bswap32(uint32_t)
+   - _Py_bswap64(uint64_t)
+*/
+
+#ifndef Py_INTERNAL_BITUTILS_H
+#define Py_INTERNAL_BITUTILS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#if defined(__GNUC__) \
+      && ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
+   /* __builtin_bswap16() is available since GCC 4.8,
+      __builtin_bswap32() is available since GCC 4.3,
+      __builtin_bswap64() is available since GCC 4.3. */
+#  define _PY_HAVE_BUILTIN_BSWAP
+#endif
+
+#ifdef _MSC_VER
+   /* Get _byteswap_ushort(), _byteswap_ulong(), _byteswap_uint64() */
+#  include <intrin.h>
+#endif
+
+static inline uint16_t
+_Py_bswap16(uint16_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
+    return __builtin_bswap16(word);
+#elif defined(_MSC_VER)
+    Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
+    return _byteswap_ushort(word);
+#else
+    // Portable implementation which doesn't rely on circular bit shift
+    return ( ((word & UINT16_C(0x00FF)) << 8)
+           | ((word & UINT16_C(0xFF00)) >> 8));
+#endif
+}
+
+static inline uint32_t
+_Py_bswap32(uint32_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
+    return __builtin_bswap32(word);
+#elif defined(_MSC_VER)
+    Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
+    return _byteswap_ulong(word);
+#else
+    // Portable implementation which doesn't rely on circular bit shift
+    return ( ((word & UINT32_C(0x000000FF)) << 24)
+           | ((word & UINT32_C(0x0000FF00)) <<  8)
+           | ((word & UINT32_C(0x00FF0000)) >>  8)
+           | ((word & UINT32_C(0xFF000000)) >> 24));
+#endif
+}
+
+static inline uint64_t
+_Py_bswap64(uint64_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
+    return __builtin_bswap64(word);
+#elif defined(_MSC_VER)
+    return _byteswap_uint64(word);
+#else
+    // Portable implementation which doesn't rely on circular bit shift
+    return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
+           | ((word & UINT64_C(0x000000000000FF00)) << 40)
+           | ((word & UINT64_C(0x0000000000FF0000)) << 24)
+           | ((word & UINT64_C(0x00000000FF000000)) <<  8)
+           | ((word & UINT64_C(0x000000FF00000000)) >>  8)
+           | ((word & UINT64_C(0x0000FF0000000000)) >> 24)
+           | ((word & UINT64_C(0x00FF000000000000)) >> 40)
+           | ((word & UINT64_C(0xFF00000000000000)) >> 56));
+#endif
+}
+
+
+// Population count: count the number of 1's in 'x'
+// (number of bits set to 1), also known as the hamming weight.
+//
+// Implementation note. CPUID is not used, to test if x86 POPCNT instruction
+// can be used, to keep the implementation simple. For example, Visual Studio
+// __popcnt() is not used this reason. The clang and GCC builtin function can
+// use the x86 POPCNT instruction if the target architecture has SSE4a or
+// newer.
+static inline int
+_Py_popcount32(uint32_t x)
+{
+#if (defined(__clang__) || defined(__GNUC__))
+
+#if SIZEOF_INT >= 4
+    Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned int));
+    return __builtin_popcount(x);
+#else
+    // The C standard guarantees that unsigned long will always be big enough
+    // to hold a uint32_t value without losing information.
+    Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned long));
+    return __builtin_popcountl(x);
+#endif
+
+#else
+    // 32-bit SWAR (SIMD Within A Register) popcount
+
+    // Binary: 0 1 0 1 ...
+    const uint32_t M1 = 0x55555555;
+    // Binary: 00 11 00 11. ..
+    const uint32_t M2 = 0x33333333;
+    // Binary: 0000 1111 0000 1111 ...
+    const uint32_t M4 = 0x0F0F0F0F;
+    // 256**4 + 256**3 + 256**2 + 256**1
+    const uint32_t SUM = 0x01010101;
+
+    // Put count of each 2 bits into those 2 bits
+    x = x - ((x >> 1) & M1);
+    // Put count of each 4 bits into those 4 bits
+    x = (x & M2) + ((x >> 2) & M2);
+    // Put count of each 8 bits into those 8 bits
+    x = (x + (x >> 4)) & M4;
+    // Sum of the 4 byte counts
+    return (uint32_t)((uint64_t)x * (uint64_t)SUM) >> 24;
+#endif
+}
+
+
+// Return the index of the most significant 1 bit in 'x'. This is the smallest
+// integer k such that x < 2**k. Equivalent to floor(log2(x)) + 1 for x != 0.
+static inline int
+_Py_bit_length(unsigned long x)
+{
+#if (defined(__clang__) || defined(__GNUC__))
+    if (x != 0) {
+        // __builtin_clzl() is available since GCC 3.4.
+        // Undefined behavior for x == 0.
+        return (int)sizeof(unsigned long) * 8 - __builtin_clzl(x);
+    }
+    else {
+        return 0;
+    }
+#elif defined(_MSC_VER)
+    // _BitScanReverse() is documented to search 32 bits.
+    Py_BUILD_ASSERT(sizeof(unsigned long) <= 4);
+    unsigned long msb;
+    if (_BitScanReverse(&msb, x)) {
+        return (int)msb + 1;
+    }
+    else {
+        return 0;
+    }
+#else
+    const int BIT_LENGTH_TABLE[32] = {
+        0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+        5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+    };
+    int msb = 0;
+    while (x >= 32) {
+        msb += 6;
+        x >>= 6;
+    }
+    msb += BIT_LENGTH_TABLE[x];
+    return msb;
+#endif
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_BITUTILS_H */
diff --git a/include/internal/pycore_blocks_output_buffer.h b/include/internal/pycore_blocks_output_buffer.h
new file mode 100644
index 0000000..28cf6fb
--- /dev/null
+++ b/include/internal/pycore_blocks_output_buffer.h
@@ -0,0 +1,317 @@
+/*
+   _BlocksOutputBuffer is used to maintain an output buffer
+   that has unpredictable size. Suitable for compression/decompression
+   API (bz2/lzma/zlib) that has stream->next_out and stream->avail_out:
+
+        stream->next_out:  point to the next output position.
+        stream->avail_out: the number of available bytes left in the buffer.
+
+   It maintains a list of bytes object, so there is no overhead of resizing
+   the buffer.
+
+   Usage:
+
+   1, Initialize the struct instance like this:
+        _BlocksOutputBuffer buffer = {.list = NULL};
+      Set .list to NULL for _BlocksOutputBuffer_OnError()
+
+   2, Initialize the buffer use one of these functions:
+        _BlocksOutputBuffer_InitAndGrow()
+        _BlocksOutputBuffer_InitWithSize()
+
+   3, If (avail_out == 0), grow the buffer:
+        _BlocksOutputBuffer_Grow()
+
+   4, Get the current outputted data size:
+        _BlocksOutputBuffer_GetDataSize()
+
+   5, Finish the buffer, and return a bytes object:
+        _BlocksOutputBuffer_Finish()
+
+   6, Clean up the buffer when an error occurred:
+        _BlocksOutputBuffer_OnError()
+*/
+
+#ifndef Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
+#define Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "Python.h"
+
+typedef struct {
+    // List of bytes objects
+    PyObject *list;
+    // Number of whole allocated size
+    Py_ssize_t allocated;
+    // Max length of the buffer, negative number means unlimited length.
+    Py_ssize_t max_length;
+} _BlocksOutputBuffer;
+
+static const char unable_allocate_msg[] = "Unable to allocate output buffer.";
+
+/* In 32-bit build, the max block size should <= INT32_MAX. */
+#define OUTPUT_BUFFER_MAX_BLOCK_SIZE (256*1024*1024)
+
+/* Block size sequence */
+#define KB (1024)
+#define MB (1024*1024)
+static const Py_ssize_t BUFFER_BLOCK_SIZE[] =
+    { 32*KB, 64*KB, 256*KB, 1*MB, 4*MB, 8*MB, 16*MB, 16*MB,
+      32*MB, 32*MB, 32*MB, 32*MB, 64*MB, 64*MB, 128*MB, 128*MB,
+      OUTPUT_BUFFER_MAX_BLOCK_SIZE };
+#undef KB
+#undef MB
+
+/* According to the block sizes defined by BUFFER_BLOCK_SIZE, the whole
+   allocated size growth step is:
+    1   32 KB       +32 KB
+    2   96 KB       +64 KB
+    3   352 KB      +256 KB
+    4   1.34 MB     +1 MB
+    5   5.34 MB     +4 MB
+    6   13.34 MB    +8 MB
+    7   29.34 MB    +16 MB
+    8   45.34 MB    +16 MB
+    9   77.34 MB    +32 MB
+    10  109.34 MB   +32 MB
+    11  141.34 MB   +32 MB
+    12  173.34 MB   +32 MB
+    13  237.34 MB   +64 MB
+    14  301.34 MB   +64 MB
+    15  429.34 MB   +128 MB
+    16  557.34 MB   +128 MB
+    17  813.34 MB   +256 MB
+    18  1069.34 MB  +256 MB
+    19  1325.34 MB  +256 MB
+    20  1581.34 MB  +256 MB
+    21  1837.34 MB  +256 MB
+    22  2093.34 MB  +256 MB
+    ...
+*/
+
+/* Initialize the buffer, and grow the buffer.
+
+   max_length: Max length of the buffer, -1 for unlimited length.
+
+   On success, return allocated size (>=0)
+   On failure, return -1
+*/
+static inline Py_ssize_t
+_BlocksOutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer,
+                                const Py_ssize_t max_length,
+                                void **next_out)
+{
+    PyObject *b;
+    Py_ssize_t block_size;
+
+    // ensure .list was set to NULL
+    assert(buffer->list == NULL);
+
+    // get block size
+    if (0 <= max_length && max_length < BUFFER_BLOCK_SIZE[0]) {
+        block_size = max_length;
+    } else {
+        block_size = BUFFER_BLOCK_SIZE[0];
+    }
+
+    // the first block
+    b = PyBytes_FromStringAndSize(NULL, block_size);
+    if (b == NULL) {
+        return -1;
+    }
+
+    // create the list
+    buffer->list = PyList_New(1);
+    if (buffer->list == NULL) {
+        Py_DECREF(b);
+        return -1;
+    }
+    PyList_SET_ITEM(buffer->list, 0, b);
+
+    // set variables
+    buffer->allocated = block_size;
+    buffer->max_length = max_length;
+
+    *next_out = PyBytes_AS_STRING(b);
+    return block_size;
+}
+
+/* Initialize the buffer, with an initial size.
+
+   Check block size limit in the outer wrapper function. For example, some libs
+   accept UINT32_MAX as the maximum block size, then init_size should <= it.
+
+   On success, return allocated size (>=0)
+   On failure, return -1
+*/
+static inline Py_ssize_t
+_BlocksOutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer,
+                                 const Py_ssize_t init_size,
+                                 void **next_out)
+{
+    PyObject *b;
+
+    // ensure .list was set to NULL
+    assert(buffer->list == NULL);
+
+    // the first block
+    b = PyBytes_FromStringAndSize(NULL, init_size);
+    if (b == NULL) {
+        PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
+        return -1;
+    }
+
+    // create the list
+    buffer->list = PyList_New(1);
+    if (buffer->list == NULL) {
+        Py_DECREF(b);
+        return -1;
+    }
+    PyList_SET_ITEM(buffer->list, 0, b);
+
+    // set variables
+    buffer->allocated = init_size;
+    buffer->max_length = -1;
+
+    *next_out = PyBytes_AS_STRING(b);
+    return init_size;
+}
+
+/* Grow the buffer. The avail_out must be 0, please check it before calling.
+
+   On success, return allocated size (>=0)
+   On failure, return -1
+*/
+static inline Py_ssize_t
+_BlocksOutputBuffer_Grow(_BlocksOutputBuffer *buffer,
+                         void **next_out,
+                         const Py_ssize_t avail_out)
+{
+    PyObject *b;
+    const Py_ssize_t list_len = Py_SIZE(buffer->list);
+    Py_ssize_t block_size;
+
+    // ensure no gaps in the data
+    if (avail_out != 0) {
+        PyErr_SetString(PyExc_SystemError,
+                        "avail_out is non-zero in _BlocksOutputBuffer_Grow().");
+        return -1;
+    }
+
+    // get block size
+    if (list_len < (Py_ssize_t) Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE)) {
+        block_size = BUFFER_BLOCK_SIZE[list_len];
+    } else {
+        block_size = BUFFER_BLOCK_SIZE[Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE) - 1];
+    }
+
+    // check max_length
+    if (buffer->max_length >= 0) {
+        // if (rest == 0), should not grow the buffer.
+        Py_ssize_t rest = buffer->max_length - buffer->allocated;
+        assert(rest > 0);
+
+        // block_size of the last block
+        if (block_size > rest) {
+            block_size = rest;
+        }
+    }
+
+    // check buffer->allocated overflow
+    if (block_size > PY_SSIZE_T_MAX - buffer->allocated) {
+        PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
+        return -1;
+    }
+
+    // create the block
+    b = PyBytes_FromStringAndSize(NULL, block_size);
+    if (b == NULL) {
+        PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
+        return -1;
+    }
+    if (PyList_Append(buffer->list, b) < 0) {
+        Py_DECREF(b);
+        return -1;
+    }
+    Py_DECREF(b);
+
+    // set variables
+    buffer->allocated += block_size;
+
+    *next_out = PyBytes_AS_STRING(b);
+    return block_size;
+}
+
+/* Return the current outputted data size. */
+static inline Py_ssize_t
+_BlocksOutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer,
+                                const Py_ssize_t avail_out)
+{
+    return buffer->allocated - avail_out;
+}
+
+/* Finish the buffer.
+
+   Return a bytes object on success
+   Return NULL on failure
+*/
+static inline PyObject *
+_BlocksOutputBuffer_Finish(_BlocksOutputBuffer *buffer,
+                           const Py_ssize_t avail_out)
+{
+    PyObject *result, *block;
+    const Py_ssize_t list_len = Py_SIZE(buffer->list);
+
+    // fast path for single block
+    if ((list_len == 1 && avail_out == 0) ||
+        (list_len == 2 && Py_SIZE(PyList_GET_ITEM(buffer->list, 1)) == avail_out))
+    {
+        block = PyList_GET_ITEM(buffer->list, 0);
+        Py_INCREF(block);
+
+        Py_CLEAR(buffer->list);
+        return block;
+    }
+
+    // final bytes object
+    result = PyBytes_FromStringAndSize(NULL, buffer->allocated - avail_out);
+    if (result == NULL) {
+        PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
+        return NULL;
+    }
+
+    // memory copy
+    if (list_len > 0) {
+        char *posi = PyBytes_AS_STRING(result);
+
+        // blocks except the last one
+        Py_ssize_t i = 0;
+        for (; i < list_len-1; i++) {
+            block = PyList_GET_ITEM(buffer->list, i);
+            memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block));
+            posi += Py_SIZE(block);
+        }
+        // the last block
+        block = PyList_GET_ITEM(buffer->list, i);
+        memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block) - avail_out);
+    } else {
+        assert(Py_SIZE(result) == 0);
+    }
+
+    Py_CLEAR(buffer->list);
+    return result;
+}
+
+/* Clean up the buffer when an error occurred. */
+static inline void
+_BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer)
+{
+    Py_CLEAR(buffer->list);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */
\ No newline at end of file
diff --git a/include/internal/pycore_byteswap.h b/include/internal/pycore_byteswap.h
deleted file mode 100644
index 2b20fc6..0000000
--- a/include/internal/pycore_byteswap.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Bytes swap functions, reverse order of bytes:
-
-   - _Py_bswap16(uint16_t)
-   - _Py_bswap32(uint32_t)
-   - _Py_bswap64(uint64_t)
-*/
-
-#ifndef Py_INTERNAL_BSWAP_H
-#define Py_INTERNAL_BSWAP_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_BUILD_CORE
-#  error "this header requires Py_BUILD_CORE define"
-#endif
-
-#if defined(__GNUC__) \
-      && ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
-   /* __builtin_bswap16() is available since GCC 4.8,
-      __builtin_bswap32() is available since GCC 4.3,
-      __builtin_bswap64() is available since GCC 4.3. */
-#  define _PY_HAVE_BUILTIN_BSWAP
-#endif
-
-#ifdef _MSC_VER
-   /* Get _byteswap_ushort(), _byteswap_ulong(), _byteswap_uint64() */
-#  include <intrin.h>
-#endif
-
-static inline uint16_t
-_Py_bswap16(uint16_t word)
-{
-#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
-    return __builtin_bswap16(word);
-#elif defined(_MSC_VER)
-    Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
-    return _byteswap_ushort(word);
-#else
-    // Portable implementation which doesn't rely on circular bit shift
-    return ( ((word & UINT16_C(0x00FF)) << 8)
-           | ((word & UINT16_C(0xFF00)) >> 8));
-#endif
-}
-
-static inline uint32_t
-_Py_bswap32(uint32_t word)
-{
-#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
-    return __builtin_bswap32(word);
-#elif defined(_MSC_VER)
-    Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
-    return _byteswap_ulong(word);
-#else
-    // Portable implementation which doesn't rely on circular bit shift
-    return ( ((word & UINT32_C(0x000000FF)) << 24)
-           | ((word & UINT32_C(0x0000FF00)) <<  8)
-           | ((word & UINT32_C(0x00FF0000)) >>  8)
-           | ((word & UINT32_C(0xFF000000)) >> 24));
-#endif
-}
-
-static inline uint64_t
-_Py_bswap64(uint64_t word)
-{
-#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
-    return __builtin_bswap64(word);
-#elif defined(_MSC_VER)
-    return _byteswap_uint64(word);
-#else
-    // Portable implementation which doesn't rely on circular bit shift
-    return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
-           | ((word & UINT64_C(0x000000000000FF00)) << 40)
-           | ((word & UINT64_C(0x0000000000FF0000)) << 24)
-           | ((word & UINT64_C(0x00000000FF000000)) <<  8)
-           | ((word & UINT64_C(0x000000FF00000000)) >>  8)
-           | ((word & UINT64_C(0x0000FF0000000000)) >> 24)
-           | ((word & UINT64_C(0x00FF000000000000)) >> 40)
-           | ((word & UINT64_C(0xFF00000000000000)) >> 56));
-#endif
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_INTERNAL_BSWAP_H */
-
diff --git a/include/internal/pycore_ceval.h b/include/internal/pycore_ceval.h
index 18c8f02..f573c3e 100644
--- a/include/internal/pycore_ceval.h
+++ b/include/internal/pycore_ceval.h
@@ -23,44 +23,50 @@
     PyInterpreterState *interp,
     int (*func)(void *),
     void *arg);
-PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyThreadState *tstate);
+PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *interp);
 #ifdef HAVE_FORK
-extern void _PyEval_ReInitThreads(struct pyruntimestate *runtime);
+extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate);
 #endif
 PyAPI_FUNC(void) _PyEval_SetCoroutineOriginTrackingDepth(
     PyThreadState *tstate,
     int new_depth);
 
-/* Private function */
 void _PyEval_Fini(void);
 
+
+extern PyObject* _PyEval_GetBuiltins(PyThreadState *tstate);
+extern PyObject *_PyEval_BuiltinsFromGlobals(
+    PyThreadState *tstate,
+    PyObject *globals);
+
+
 static inline PyObject*
 _PyEval_EvalFrame(PyThreadState *tstate, PyFrameObject *f, int throwflag)
 {
     return tstate->interp->eval_frame(tstate, f, throwflag);
 }
 
-extern PyObject *_PyEval_EvalCode(
-    PyThreadState *tstate,
-    PyObject *_co, PyObject *globals, PyObject *locals,
-    PyObject *const *args, Py_ssize_t argcount,
-    PyObject *const *kwnames, PyObject *const *kwargs,
-    Py_ssize_t kwcount, int kwstep,
-    PyObject *const *defs, Py_ssize_t defcount,
-    PyObject *kwdefs, PyObject *closure,
-    PyObject *name, PyObject *qualname);
+extern PyObject *
+_PyEval_Vector(PyThreadState *tstate,
+            PyFrameConstructor *desc, PyObject *locals,
+            PyObject* const* args, size_t argcount,
+            PyObject *kwnames);
 
+#ifdef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
+extern int _PyEval_ThreadsInitialized(PyInterpreterState *interp);
+#else
 extern int _PyEval_ThreadsInitialized(struct pyruntimestate *runtime);
+#endif
 extern PyStatus _PyEval_InitGIL(PyThreadState *tstate);
-extern void _PyEval_FiniGIL(PyThreadState *tstate);
+extern void _PyEval_FiniGIL(PyInterpreterState *interp);
 
 extern void _PyEval_ReleaseLock(PyThreadState *tstate);
 
+extern void _PyEval_DeactivateOpCache(void);
+
 
 /* --- _Py_EnterRecursiveCall() ----------------------------------------- */
 
-PyAPI_DATA(int) _Py_CheckRecursionLimit;
-
 #ifdef USE_STACKCHECK
 /* With USE_STACKCHECK macro defined, trigger stack checks in
    _Py_CheckRecursiveCall() on every 64th call to Py_EnterRecursiveCall. */
@@ -90,24 +96,8 @@
 
 #define Py_EnterRecursiveCall(where) _Py_EnterRecursiveCall_inline(where)
 
-/* Compute the "lower-water mark" for a recursion limit. When
- * Py_LeaveRecursiveCall() is called with a recursion depth below this mark,
- * the overflowed flag is reset to 0. */
-static inline int _Py_RecursionLimitLowerWaterMark(int limit) {
-    if (limit > 200) {
-        return (limit - 50);
-    }
-    else {
-        return (3 * (limit >> 2));
-    }
-}
-
 static inline void _Py_LeaveRecursiveCall(PyThreadState *tstate)  {
     tstate->recursion_depth--;
-    int limit = tstate->interp->ceval.recursion_limit;
-    if (tstate->recursion_depth < _Py_RecursionLimitLowerWaterMark(limit)) {
-        tstate->overflowed = 0;
-    }
 }
 
 static inline void _Py_LeaveRecursiveCall_inline(void)  {
diff --git a/include/internal/pycore_code.h b/include/internal/pycore_code.h
index 88956f1..8ff1863 100644
--- a/include/internal/pycore_code.h
+++ b/include/internal/pycore_code.h
@@ -3,16 +3,23 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
- 
+
 typedef struct {
     PyObject *ptr;  /* Cached pointer (borrowed reference) */
     uint64_t globals_ver;  /* ma_version of global dict */
     uint64_t builtins_ver; /* ma_version of builtin dict */
 } _PyOpcache_LoadGlobal;
 
+typedef struct {
+    PyTypeObject *type;
+    Py_ssize_t hint;
+    unsigned int tp_version_tag;
+} _PyOpCodeOpt_LoadAttr;
+
 struct _PyOpcache {
     union {
         _PyOpcache_LoadGlobal lg;
+        _PyOpCodeOpt_LoadAttr la;
     } u;
     char optimized;
 };
diff --git a/include/internal/pycore_compile.h b/include/internal/pycore_compile.h
new file mode 100644
index 0000000..06a6082
--- /dev/null
+++ b/include/internal/pycore_compile.h
@@ -0,0 +1,44 @@
+#ifndef Py_INTERNAL_COMPILE_H
+#define Py_INTERNAL_COMPILE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+struct _arena;   // Type defined in pycore_pyarena.h
+struct _mod;     // Type defined in pycore_ast.h
+
+// Export the symbol for test_peg_generator (built as a library)
+PyAPI_FUNC(PyCodeObject*) _PyAST_Compile(
+    struct _mod *mod,
+    PyObject *filename,
+    PyCompilerFlags *flags,
+    int optimize,
+    struct _arena *arena);
+extern PyFutureFeatures* _PyFuture_FromAST(
+    struct _mod * mod,
+    PyObject *filename
+    );
+
+extern PyObject* _Py_Mangle(PyObject *p, PyObject *name);
+
+typedef struct {
+    int optimize;
+    int ff_features;
+
+    int recursion_depth;            /* current recursion depth */
+    int recursion_limit;            /* recursion limit */
+} _PyASTOptimizeState;
+
+extern int _PyAST_Optimize(
+    struct _mod *,
+    struct _arena *arena,
+    _PyASTOptimizeState *state);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_COMPILE_H */
diff --git a/include/internal/pycore_context.h b/include/internal/pycore_context.h
index f665ad5..a482dd4 100644
--- a/include/internal/pycore_context.h
+++ b/include/internal/pycore_context.h
@@ -37,6 +37,6 @@
 
 
 int _PyContext_Init(void);
-void _PyContext_Fini(void);
+void _PyContext_Fini(PyInterpreterState *interp);
 
 #endif /* !Py_INTERNAL_CONTEXT_H */
diff --git a/include/internal/pycore_fileutils.h b/include/internal/pycore_fileutils.h
index bbee586..c1c9244 100644
--- a/include/internal/pycore_fileutils.h
+++ b/include/internal/pycore_fileutils.h
@@ -48,6 +48,23 @@
     PyObject **decimal_point,
     PyObject **thousands_sep);
 
+PyAPI_FUNC(void) _Py_closerange(int first, int last);
+
+PyAPI_FUNC(wchar_t*) _Py_GetLocaleEncoding(void);
+PyAPI_FUNC(PyObject*) _Py_GetLocaleEncodingObject(void);
+
+#ifdef HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION
+extern int _Py_LocaleUsesNonUnicodeWchar(void);
+
+extern wchar_t* _Py_DecodeNonUnicodeWchar(
+    const wchar_t* native,
+    Py_ssize_t size);
+
+extern int _Py_EncodeNonUnicodeWchar_InPlace(
+    wchar_t* unicode,
+    Py_ssize_t size);
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/internal/pycore_format.h b/include/internal/pycore_format.h
new file mode 100644
index 0000000..1b8d575
--- /dev/null
+++ b/include/internal/pycore_format.h
@@ -0,0 +1,27 @@
+#ifndef Py_INTERNAL_FORMAT_H
+#define Py_INTERNAL_FORMAT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+/* Format codes
+ * F_LJUST      '-'
+ * F_SIGN       '+'
+ * F_BLANK      ' '
+ * F_ALT        '#'
+ * F_ZERO       '0'
+ */
+#define F_LJUST (1<<0)
+#define F_SIGN  (1<<1)
+#define F_BLANK (1<<2)
+#define F_ALT   (1<<3)
+#define F_ZERO  (1<<4)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_FORMAT_H */
diff --git a/include/internal/pycore_gc.h b/include/internal/pycore_gc.h
index 0511eea..9db4a47 100644
--- a/include/internal/pycore_gc.h
+++ b/include/internal/pycore_gc.h
@@ -161,17 +161,19 @@
     Py_ssize_t long_lived_pending;
 };
 
-PyAPI_FUNC(void) _PyGC_InitState(struct _gc_runtime_state *);
+extern void _PyGC_InitState(struct _gc_runtime_state *);
+
+extern Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate);
 
 
 // Functions to clear types free lists
-extern void _PyFrame_ClearFreeList(void);
-extern void _PyTuple_ClearFreeList(void);
-extern void _PyFloat_ClearFreeList(void);
-extern void _PyList_ClearFreeList(void);
-extern void _PyDict_ClearFreeList(void);
-extern void _PyAsyncGen_ClearFreeLists(void);
-extern void _PyContext_ClearFreeList(void);
+extern void _PyFrame_ClearFreeList(PyInterpreterState *interp);
+extern void _PyTuple_ClearFreeList(PyInterpreterState *interp);
+extern void _PyFloat_ClearFreeList(PyInterpreterState *interp);
+extern void _PyList_ClearFreeList(PyInterpreterState *interp);
+extern void _PyDict_ClearFreeList(PyInterpreterState *interp);
+extern void _PyAsyncGen_ClearFreeLists(PyInterpreterState *interp);
+extern void _PyContext_ClearFreeList(PyInterpreterState *interp);
 
 #ifdef __cplusplus
 }
diff --git a/include/internal/pycore_import.h b/include/internal/pycore_import.h
index b011ea4..e21ed0a 100644
--- a/include/internal/pycore_import.h
+++ b/include/internal/pycore_import.h
@@ -5,15 +5,10 @@
 extern "C" {
 #endif
 
-PyAPI_FUNC(PyObject *) _PyImport_FindBuiltin(
-    PyThreadState *tstate,
-    const char *name             /* UTF-8 encoded string */
-    );
-
 #ifdef HAVE_FORK
-extern void _PyImport_ReInitLock(void);
+extern PyStatus _PyImport_ReInitLock(void);
 #endif
-extern void _PyImport_Cleanup(PyThreadState *tstate);
+extern PyObject* _PyImport_BootstrapImp(PyThreadState *tstate);
 
 #ifdef __cplusplus
 }
diff --git a/include/internal/pycore_initconfig.h b/include/internal/pycore_initconfig.h
index 457a005..4b009e8 100644
--- a/include/internal/pycore_initconfig.h
+++ b/include/internal/pycore_initconfig.h
@@ -44,6 +44,8 @@
 #define _PyStatus_UPDATE_FUNC(err) \
     do { err.func = _PyStatus_GET_FUNC(); } while (0)
 
+PyObject* _PyErr_SetFromPyStatus(PyStatus status);
+
 /* --- PyWideStringList ------------------------------------------------ */
 
 #define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL}
@@ -100,6 +102,7 @@
     int isolated;             /* -I option */
     int use_environment;      /* -E option */
     int dev_mode;             /* -X dev and PYTHONDEVMODE */
+    int warn_default_encoding;     /* -X warn_default_encoding and PYTHONWARNDEFAULTENCODING */
 } _PyPreCmdline;
 
 #define _PyPreCmdline_INIT \
@@ -149,13 +152,19 @@
 extern PyStatus _PyConfig_Copy(
     PyConfig *config,
     const PyConfig *config2);
-extern PyStatus _PyConfig_InitPathConfig(PyConfig *config);
+extern PyStatus _PyConfig_InitPathConfig(
+    PyConfig *config,
+    int compute_path_config);
+extern PyStatus _PyConfig_Read(PyConfig *config, int compute_path_config);
 extern PyStatus _PyConfig_Write(const PyConfig *config,
     struct pyruntimestate *runtime);
 extern PyStatus _PyConfig_SetPyArgv(
     PyConfig *config,
     const _PyArgv *args);
 
+PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config);
+PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict);
+
 
 /* --- Function used for testing ---------------------------------- */
 
diff --git a/include/internal/pycore_interp.h b/include/internal/pycore_interp.h
index 551ad83..4307b61 100644
--- a/include/internal/pycore_interp.h
+++ b/include/internal/pycore_interp.h
@@ -8,12 +8,11 @@
 #  error "this header requires Py_BUILD_CORE define"
 #endif
 
-#include "pycore_atomic.h"    /* _Py_atomic_address */
-#include "pycore_gil.h"       /* struct _gil_runtime_state  */
-#include "pycore_gc.h"        /* struct _gc_runtime_state */
-#include "pycore_warnings.h"  /* struct _warnings_runtime_state */
-
-/* ceval state */
+#include "pycore_atomic.h"        // _Py_atomic_address
+#include "pycore_ast_state.h"     // struct ast_state
+#include "pycore_gil.h"           // struct _gil_runtime_state
+#include "pycore_gc.h"            // struct _gc_runtime_state
+#include "pycore_warnings.h"      // struct _warnings_runtime_state
 
 struct _pending_calls {
     PyThread_type_lock lock;
@@ -34,18 +33,15 @@
 
 struct _ceval_state {
     int recursion_limit;
-    /* Records whether tracing is on for any thread.  Counts the number
-       of threads for which tstate->c_tracefunc is non-NULL, so if the
-       value is 0, we know we don't have to check this thread's
-       c_tracefunc.  This speeds up the if statement in
-       _PyEval_EvalFrameDefault() after fast_next_opcode. */
-    int tracing_possible;
     /* This single variable consolidates all requests to break out of
        the fast path in the eval loop. */
     _Py_atomic_int eval_breaker;
     /* Request for dropping the GIL */
     _Py_atomic_int gil_drop_request;
     struct _pending_calls pending;
+#ifdef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
+    struct _gil_runtime_state gil;
+#endif
 };
 
 /* fs_codec.encoding is initialized to NULL.
@@ -57,8 +53,150 @@
     _Py_error_handler error_handler;
 };
 
+struct _Py_bytes_state {
+    PyObject *empty_string;
+    PyBytesObject *characters[256];
+};
+
+struct _Py_unicode_ids {
+    Py_ssize_t size;
+    PyObject **array;
+};
+
 struct _Py_unicode_state {
+    // The empty Unicode object is a singleton to improve performance.
+    PyObject *empty_string;
+    /* Single character Unicode strings in the Latin-1 range are being
+       shared as well. */
+    PyObject *latin1[256];
     struct _Py_unicode_fs_codec fs_codec;
+
+    // Unused member kept for ABI backward compatibility with Python 3.10.0:
+    // see bpo-46006.
+    PyObject *unused_interned;
+
+    // Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId()
+    struct _Py_unicode_ids ids;
+};
+
+struct _Py_float_state {
+    /* Special free list
+       free_list is a singly-linked list of available PyFloatObjects,
+       linked via abuse of their ob_type members. */
+    int numfree;
+    PyFloatObject *free_list;
+};
+
+/* Speed optimization to avoid frequent malloc/free of small tuples */
+#ifndef PyTuple_MAXSAVESIZE
+   // Largest tuple to save on free list
+#  define PyTuple_MAXSAVESIZE 20
+#endif
+#ifndef PyTuple_MAXFREELIST
+   // Maximum number of tuples of each size to save
+#  define PyTuple_MAXFREELIST 2000
+#endif
+
+struct _Py_tuple_state {
+#if PyTuple_MAXSAVESIZE > 0
+    /* Entries 1 up to PyTuple_MAXSAVESIZE are free lists,
+       entry 0 is the empty tuple () of which at most one instance
+       will be allocated. */
+    PyTupleObject *free_list[PyTuple_MAXSAVESIZE];
+    int numfree[PyTuple_MAXSAVESIZE];
+#endif
+};
+
+/* Empty list reuse scheme to save calls to malloc and free */
+#ifndef PyList_MAXFREELIST
+#  define PyList_MAXFREELIST 80
+#endif
+
+struct _Py_list_state {
+    PyListObject *free_list[PyList_MAXFREELIST];
+    int numfree;
+};
+
+#ifndef PyDict_MAXFREELIST
+#  define PyDict_MAXFREELIST 80
+#endif
+
+struct _Py_dict_state {
+    /* Dictionary reuse scheme to save calls to malloc and free */
+    PyDictObject *free_list[PyDict_MAXFREELIST];
+    int numfree;
+    PyDictKeysObject *keys_free_list[PyDict_MAXFREELIST];
+    int keys_numfree;
+};
+
+struct _Py_frame_state {
+    PyFrameObject *free_list;
+    /* number of frames currently in free_list */
+    int numfree;
+};
+
+#ifndef _PyAsyncGen_MAXFREELIST
+#  define _PyAsyncGen_MAXFREELIST 80
+#endif
+
+struct _Py_async_gen_state {
+    /* Freelists boost performance 6-10%; they also reduce memory
+       fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend
+       are short-living objects that are instantiated for every
+       __anext__() call. */
+    struct _PyAsyncGenWrappedValue* value_freelist[_PyAsyncGen_MAXFREELIST];
+    int value_numfree;
+
+    struct PyAsyncGenASend* asend_freelist[_PyAsyncGen_MAXFREELIST];
+    int asend_numfree;
+};
+
+struct _Py_context_state {
+    // List of free PyContext objects
+    PyContext *freelist;
+    int numfree;
+};
+
+struct _Py_exc_state {
+    // The dict mapping from errno codes to OSError subclasses
+    PyObject *errnomap;
+    PyBaseExceptionObject *memerrors_freelist;
+    int memerrors_numfree;
+};
+
+
+// atexit state
+typedef struct {
+    PyObject *func;
+    PyObject *args;
+    PyObject *kwargs;
+} atexit_callback;
+
+struct atexit_state {
+    atexit_callback **callbacks;
+    int ncallbacks;
+    int callback_len;
+};
+
+
+// Type attribute lookup cache: speed up attribute and method lookups,
+// see _PyType_Lookup().
+struct type_cache_entry {
+    unsigned int version;  // initialized from type->tp_version_tag
+    PyObject *name;        // reference to exactly a str or None
+    PyObject *value;       // borrowed reference or NULL
+};
+
+#define MCACHE_SIZE_EXP 12
+#define MCACHE_STATS 0
+
+struct type_cache {
+    struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP];
+#if MCACHE_STATS
+    size_t hits;
+    size_t misses;
+    size_t collisions;
+#endif
 };
 
 
@@ -67,6 +205,11 @@
 #define _PY_NSMALLPOSINTS           257
 #define _PY_NSMALLNEGINTS           5
 
+// _PyLong_GetZero() and _PyLong_GetOne() must always be available
+#if _PY_NSMALLPOSINTS < 2
+#  error "_PY_NSMALLPOSINTS must be greater than 1"
+#endif
+
 // The PyInterpreterState typedef is in Include/pystate.h.
 struct _is {
 
@@ -88,10 +231,14 @@
     struct _ceval_state ceval;
     struct _gc_runtime_state gc;
 
+    // sys.modules dictionary
     PyObject *modules;
     PyObject *modules_by_index;
+    // Dictionary of the sys module
     PyObject *sysdict;
+    // Dictionary of the builtins module
     PyObject *builtins;
+    // importlib module
     PyObject *importlib;
 
     /* Used in Modules/_threadmodule.c. */
@@ -107,8 +254,6 @@
     PyObject *codec_error_registry;
     int codecs_initialized;
 
-    struct _Py_unicode_state unicode;
-
     PyConfig config;
 #ifdef HAVE_DLOPEN
     int dlopenflags;
@@ -118,7 +263,7 @@
 
     PyObject *builtins_copy;
     PyObject *import_func;
-    /* Initialized to PyEval_EvalFrameDefault(). */
+    // Initialized to _PyEval_EvalFrameDefault().
     _PyFrameEvalFunction eval_frame;
 
     Py_ssize_t co_extra_user_count;
@@ -129,40 +274,41 @@
     PyObject *after_forkers_parent;
     PyObject *after_forkers_child;
 #endif
-    /* AtExit module */
-    void (*pyexitfunc)(PyObject *);
-    PyObject *pyexitmodule;
 
     uint64_t tstate_next_unique_id;
 
     struct _warnings_runtime_state warnings;
+    struct atexit_state atexit;
 
     PyObject *audit_hooks;
 
-    struct {
-        struct {
-            int level;
-            int atbol;
-        } listnode;
-    } parser;
-
-#if _PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS > 0
     /* Small integers are preallocated in this array so that they
        can be shared.
        The integers that are preallocated are those in the range
        -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (not inclusive).
     */
     PyLongObject* small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
-#endif
+    struct _Py_bytes_state bytes;
+    struct _Py_unicode_state unicode;
+    struct _Py_float_state float_state;
+    /* Using a cache is very effective since typically only a single slice is
+       created and then deleted again. */
+    PySliceObject *slice_cache;
+
+    struct _Py_tuple_state tuple;
+    struct _Py_list_state list;
+    struct _Py_dict_state dict_state;
+    struct _Py_frame_state frame;
+    struct _Py_async_gen_state async_gen;
+    struct _Py_context_state context;
+    struct _Py_exc_state exc_state;
+
+    struct ast_state ast;
+    struct type_cache type_cache;
 };
 
-/* Used by _PyImport_Cleanup() */
 extern void _PyInterpreterState_ClearModules(PyInterpreterState *interp);
-
-extern PyStatus _PyInterpreterState_SetConfig(
-    PyInterpreterState *interp,
-    const PyConfig *config);
-
+extern void _PyInterpreterState_Clear(PyThreadState *tstate);
 
 
 /* cross-interpreter data registry */
@@ -182,11 +328,10 @@
 PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(int64_t);
 
 PyAPI_FUNC(int) _PyInterpreterState_IDInitref(struct _is *);
-PyAPI_FUNC(void) _PyInterpreterState_IDIncref(struct _is *);
+PyAPI_FUNC(int) _PyInterpreterState_IDIncref(struct _is *);
 PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *);
 
 #ifdef __cplusplus
 }
 #endif
 #endif /* !Py_INTERNAL_INTERP_H */
-
diff --git a/include/internal/pycore_list.h b/include/internal/pycore_list.h
new file mode 100644
index 0000000..f18fb05
--- /dev/null
+++ b/include/internal/pycore_list.h
@@ -0,0 +1,20 @@
+#ifndef Py_INTERNAL_LIST_H
+#define Py_INTERNAL_LIST_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "listobject.h"           // _PyList_CAST()
+
+
+#define _PyList_ITEMS(op) (_PyList_CAST(op)->ob_item)
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif   /* !Py_INTERNAL_LIST_H */
diff --git a/include/internal/pycore_long.h b/include/internal/pycore_long.h
new file mode 100644
index 0000000..2bea3a5
--- /dev/null
+++ b/include/internal/pycore_long.h
@@ -0,0 +1,40 @@
+#ifndef Py_INTERNAL_LONG_H
+#define Py_INTERNAL_LONG_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_interp.h"        // PyInterpreterState.small_ints
+#include "pycore_pystate.h"       // _PyThreadState_GET()
+
+// Don't call this function but _PyLong_GetZero() and _PyLong_GetOne()
+static inline PyObject* __PyLong_GetSmallInt_internal(int value)
+{
+    PyInterpreterState *interp = _PyInterpreterState_GET();
+    assert(-_PY_NSMALLNEGINTS <= value && value < _PY_NSMALLPOSINTS);
+    size_t index = _PY_NSMALLNEGINTS + value;
+    PyObject *obj = (PyObject*)interp->small_ints[index];
+    // _PyLong_GetZero(), _PyLong_GetOne() and get_small_int() must not be
+    // called before _PyLong_Init() nor after _PyLong_Fini().
+    assert(obj != NULL);
+    return obj;
+}
+
+// Return a borrowed reference to the zero singleton.
+// The function cannot return NULL.
+static inline PyObject* _PyLong_GetZero(void)
+{ return __PyLong_GetSmallInt_internal(0); }
+
+// Return a borrowed reference to the one singleton.
+// The function cannot return NULL.
+static inline PyObject* _PyLong_GetOne(void)
+{ return __PyLong_GetSmallInt_internal(1); }
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_LONG_H */
diff --git a/include/internal/pycore_moduleobject.h b/include/internal/pycore_moduleobject.h
new file mode 100644
index 0000000..e9978ab
--- /dev/null
+++ b/include/internal/pycore_moduleobject.h
@@ -0,0 +1,42 @@
+#ifndef Py_INTERNAL_MODULEOBJECT_H
+#define Py_INTERNAL_MODULEOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+typedef struct {
+    PyObject_HEAD
+    PyObject *md_dict;
+    struct PyModuleDef *md_def;
+    void *md_state;
+    PyObject *md_weaklist;
+    // for logging purposes after md_dict is cleared
+    PyObject *md_name;
+} PyModuleObject;
+
+static inline PyModuleDef* _PyModule_GetDef(PyObject *mod) {
+    assert(PyModule_Check(mod));
+    return ((PyModuleObject *)mod)->md_def;
+}
+
+static inline void* _PyModule_GetState(PyObject* mod) {
+    assert(PyModule_Check(mod));
+    return ((PyModuleObject *)mod)->md_state;
+}
+
+static inline PyObject* _PyModule_GetDict(PyObject *mod) {
+    assert(PyModule_Check(mod));
+    PyObject *dict = ((PyModuleObject *)mod) -> md_dict;
+    // _PyModule_GetDict(mod) must not be used after calling module_clear(mod)
+    assert(dict != NULL);
+    return dict;
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_MODULEOBJECT_H */
diff --git a/include/internal/pycore_object.h b/include/internal/pycore_object.h
index 32e86d0..90d9813 100644
--- a/include/internal/pycore_object.h
+++ b/include/internal/pycore_object.h
@@ -8,15 +8,59 @@
 #  error "this header requires Py_BUILD_CORE define"
 #endif
 
-#include "pycore_gc.h"         // _PyObject_GC_IS_TRACKED()
-#include "pycore_interp.h"     // PyInterpreterState.gc
-#include "pycore_pystate.h"    // _PyThreadState_GET()
+#include "pycore_gc.h"            // _PyObject_GC_IS_TRACKED()
+#include "pycore_interp.h"        // PyInterpreterState.gc
+#include "pycore_pystate.h"       // _PyInterpreterState_GET()
 
 PyAPI_FUNC(int) _PyType_CheckConsistency(PyTypeObject *type);
 PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
 
+/* Update the Python traceback of an object. This function must be called
+   when a memory block is reused from a free list.
+
+   Internal function called by _Py_NewReference(). */
+extern int _PyTraceMalloc_NewReference(PyObject *op);
+
+// Fast inlined version of PyType_HasFeature()
+static inline int
+_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
+    return ((type->tp_flags & feature) != 0);
+}
+
+extern void _PyType_InitCache(PyInterpreterState *interp);
+
+/* Only private in Python 3.10 and 3.9.8+; public in 3.11 */
+extern PyObject *_PyType_GetQualName(PyTypeObject *type);
+
+/* Inline functions trading binary compatibility for speed:
+   _PyObject_Init() is the fast version of PyObject_Init(), and
+   _PyObject_InitVar() is the fast version of PyObject_InitVar().
+
+   These inline functions must not be called with op=NULL. */
+static inline void
+_PyObject_Init(PyObject *op, PyTypeObject *typeobj)
+{
+    assert(op != NULL);
+    Py_SET_TYPE(op, typeobj);
+    if (_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE)) {
+        Py_INCREF(typeobj);
+    }
+    _Py_NewReference(op);
+}
+
+static inline void
+_PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
+{
+    assert(op != NULL);
+    Py_SET_SIZE(op, size);
+    _PyObject_Init((PyObject *)op, typeobj);
+}
+
+
 /* Tell the GC to track this object.
  *
+ * The object must not be tracked by the GC.
+ *
  * NB: While the object is tracked by the collector, it must be safe to call the
  * ob_traverse method.
  *
@@ -24,23 +68,27 @@
  * because it's not object header.  So we don't use _PyGCHead_PREV() and
  * _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
  *
- * The PyObject_GC_Track() function is the public version of this macro.
+ * See also the public PyObject_GC_Track() function.
  */
-static inline void _PyObject_GC_TRACK_impl(const char *filename, int lineno,
-                                           PyObject *op)
+static inline void _PyObject_GC_TRACK(
+// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
+#ifndef NDEBUG
+    const char *filename, int lineno,
+#endif
+    PyObject *op)
 {
     _PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
                           "object already tracked by the garbage collector",
-                          filename, lineno, "_PyObject_GC_TRACK");
+                          filename, lineno, __func__);
 
     PyGC_Head *gc = _Py_AS_GC(op);
     _PyObject_ASSERT_FROM(op,
                           (gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
                           "object is in generation which is garbage collected",
-                          filename, lineno, "_PyObject_GC_TRACK");
+                          filename, lineno, __func__);
 
-    PyThreadState *tstate = _PyThreadState_GET();
-    PyGC_Head *generation0 = tstate->interp->gc.generation0;
+    PyInterpreterState *interp = _PyInterpreterState_GET();
+    PyGC_Head *generation0 = interp->gc.generation0;
     PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
     _PyGCHead_SET_NEXT(last, gc);
     _PyGCHead_SET_PREV(gc, last);
@@ -48,9 +96,6 @@
     generation0->_gc_prev = (uintptr_t)gc;
 }
 
-#define _PyObject_GC_TRACK(op) \
-    _PyObject_GC_TRACK_impl(__FILE__, __LINE__, _PyObject_CAST(op))
-
 /* Tell the GC to stop tracking this object.
  *
  * Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
@@ -58,14 +103,19 @@
  *
  * The object must be tracked by the GC.
  *
- * The PyObject_GC_UnTrack() function is the public version of this macro.
+ * See also the public PyObject_GC_UnTrack() which accept an object which is
+ * not tracked.
  */
-static inline void _PyObject_GC_UNTRACK_impl(const char *filename, int lineno,
-                                             PyObject *op)
+static inline void _PyObject_GC_UNTRACK(
+// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
+#ifndef NDEBUG
+    const char *filename, int lineno,
+#endif
+    PyObject *op)
 {
     _PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
                           "object not tracked by the garbage collector",
-                          filename, lineno, "_PyObject_GC_UNTRACK");
+                          filename, lineno, __func__);
 
     PyGC_Head *gc = _Py_AS_GC(op);
     PyGC_Head *prev = _PyGCHead_PREV(gc);
@@ -76,8 +126,20 @@
     gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
 }
 
-#define _PyObject_GC_UNTRACK(op) \
-    _PyObject_GC_UNTRACK_impl(__FILE__, __LINE__, _PyObject_CAST(op))
+// Macros to accept any type for the parameter, and to automatically pass
+// the filename and the filename (if NDEBUG is not defined) where the macro
+// is called.
+#ifdef NDEBUG
+#  define _PyObject_GC_TRACK(op) \
+        _PyObject_GC_TRACK(_PyObject_CAST(op))
+#  define _PyObject_GC_UNTRACK(op) \
+        _PyObject_GC_UNTRACK(_PyObject_CAST(op))
+#else
+#  define _PyObject_GC_TRACK(op) \
+        _PyObject_GC_TRACK(__FILE__, __LINE__, _PyObject_CAST(op))
+#  define _PyObject_GC_UNTRACK(op) \
+        _PyObject_GC_UNTRACK(__FILE__, __LINE__, _PyObject_CAST(op))
+#endif
 
 #ifdef Py_REF_DEBUG
 extern void _PyDebug_PrintTotalRefs(void);
@@ -96,12 +158,6 @@
     return (PyObject **)((char *)op + offset);
 }
 
-// Fast inlined version of PyType_HasFeature()
-static inline int
-_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
-    return ((type->tp_flags & feature) != 0);
-}
-
 // Fast inlined version of PyObject_IS_GC()
 static inline int
 _PyObject_IS_GC(PyObject *obj)
@@ -114,6 +170,16 @@
 // Fast inlined version of PyType_IS_GC()
 #define _PyType_IS_GC(t) _PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
 
+// Usage: assert(_Py_CheckSlotResult(obj, "__getitem__", result != NULL));
+extern int _Py_CheckSlotResult(
+    PyObject *obj,
+    const char *slot_name,
+    int success);
+
+// PyType_Ready() must be called if _PyType_IsReady() is false.
+// See also the Py_TPFLAGS_READY flag.
+#define _PyType_IsReady(type) ((type)->tp_dict != NULL)
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/internal/pycore_parser.h b/include/internal/pycore_parser.h
new file mode 100644
index 0000000..e2de24e
--- /dev/null
+++ b/include/internal/pycore_parser.h
@@ -0,0 +1,31 @@
+#ifndef Py_INTERNAL_PARSER_H
+#define Py_INTERNAL_PARSER_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+extern struct _mod* _PyParser_ASTFromString(
+    const char *str,
+    PyObject* filename,
+    int mode,
+    PyCompilerFlags *flags,
+    PyArena *arena);
+extern struct _mod* _PyParser_ASTFromFile(
+    FILE *fp,
+    PyObject *filename_ob,
+    const char *enc,
+    int mode,
+    const char *ps1,
+    const char *ps2,
+    PyCompilerFlags *flags,
+    int *errcode,
+    PyArena *arena);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_PARSER_H */
diff --git a/include/internal/pycore_pathconfig.h b/include/internal/pycore_pathconfig.h
index 42d61b1..15447f5 100644
--- a/include/internal/pycore_pathconfig.h
+++ b/include/internal/pycore_pathconfig.h
@@ -65,6 +65,7 @@
 
 extern PyStatus _PyConfig_WritePathConfig(const PyConfig *config);
 extern void _Py_DumpPathConfig(PyThreadState *tstate);
+extern PyObject* _PyPathConfig_AsDict(void);
 
 #ifdef __cplusplus
 }
diff --git a/include/internal/pycore_pyarena.h b/include/internal/pycore_pyarena.h
new file mode 100644
index 0000000..d78972a
--- /dev/null
+++ b/include/internal/pycore_pyarena.h
@@ -0,0 +1,64 @@
+/* An arena-like memory interface for the compiler.
+ */
+
+#ifndef Py_INTERNAL_PYARENA_H
+#define Py_INTERNAL_PYARENA_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+typedef struct _arena PyArena;
+
+/* _PyArena_New() and _PyArena_Free() create a new arena and free it,
+   respectively.  Once an arena has been created, it can be used
+   to allocate memory via _PyArena_Malloc().  Pointers to PyObject can
+   also be registered with the arena via _PyArena_AddPyObject(), and the
+   arena will ensure that the PyObjects stay alive at least until
+   _PyArena_Free() is called.  When an arena is freed, all the memory it
+   allocated is freed, the arena releases internal references to registered
+   PyObject*, and none of its pointers are valid.
+   XXX (tim) What does "none of its pointers are valid" mean?  Does it
+   XXX mean that pointers previously obtained via _PyArena_Malloc() are
+   XXX no longer valid?  (That's clearly true, but not sure that's what
+   XXX the text is trying to say.)
+
+   _PyArena_New() returns an arena pointer.  On error, it
+   returns a negative number and sets an exception.
+   XXX (tim):  Not true.  On error, _PyArena_New() actually returns NULL,
+   XXX and looks like it may or may not set an exception (e.g., if the
+   XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on
+   XXX and an exception is set; OTOH, if the internal
+   XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but
+   XXX an exception is not set in that case).
+*/
+PyAPI_FUNC(PyArena*) _PyArena_New(void);
+PyAPI_FUNC(void) _PyArena_Free(PyArena *);
+
+/* Mostly like malloc(), return the address of a block of memory spanning
+ * `size` bytes, or return NULL (without setting an exception) if enough
+ * new memory can't be obtained.  Unlike malloc(0), _PyArena_Malloc() with
+ * size=0 does not guarantee to return a unique pointer (the pointer
+ * returned may equal one or more other pointers obtained from
+ * _PyArena_Malloc()).
+ * Note that pointers obtained via _PyArena_Malloc() must never be passed to
+ * the system free() or realloc(), or to any of Python's similar memory-
+ * management functions.  _PyArena_Malloc()-obtained pointers remain valid
+ * until _PyArena_Free(ar) is called, at which point all pointers obtained
+ * from the arena `ar` become invalid simultaneously.
+ */
+PyAPI_FUNC(void*) _PyArena_Malloc(PyArena *, size_t size);
+
+/* This routine isn't a proper arena allocation routine.  It takes
+ * a PyObject* and records it so that it can be DECREFed when the
+ * arena is freed.
+ */
+PyAPI_FUNC(int) _PyArena_AddPyObject(PyArena *, PyObject *);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_PYARENA_H */
diff --git a/include/internal/pycore_pyerrors.h b/include/internal/pycore_pyerrors.h
index 2cf1160..a5e97fe 100644
--- a/include/internal/pycore_pyerrors.h
+++ b/include/internal/pycore_pyerrors.h
@@ -84,6 +84,12 @@
 
 PyAPI_FUNC(int) _PyErr_CheckSignalsTstate(PyThreadState *tstate);
 
+PyAPI_FUNC(void) _Py_DumpExtensionModules(int fd, PyInterpreterState *interp);
+
+extern PyObject* _Py_Offer_Suggestions(PyObject* exception);
+PyAPI_FUNC(Py_ssize_t) _Py_UTF8_Edit_Cost(PyObject *str_a, PyObject *str_b,
+                                          Py_ssize_t max_cost);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/internal/pycore_pylifecycle.h b/include/internal/pycore_pylifecycle.h
index 50ab645..524be9d 100644
--- a/include/internal/pycore_pylifecycle.h
+++ b/include/internal/pycore_pylifecycle.h
@@ -8,6 +8,24 @@
 #  error "this header requires Py_BUILD_CORE define"
 #endif
 
+#ifdef HAVE_SIGNAL_H
+#include <signal.h>
+#endif
+
+#include "pycore_runtime.h"       // _PyRuntimeState
+
+#ifndef NSIG
+# if defined(_NSIG)
+#  define NSIG _NSIG            /* For BSD/SysV */
+# elif defined(_SIGMAX)
+#  define NSIG (_SIGMAX + 1)    /* For QNX */
+# elif defined(SIGMAX)
+#  define NSIG (SIGMAX + 1)     /* For djgpp */
+# else
+#  define NSIG 64               /* Use a reasonable default value */
+# endif
+#endif
+
 /* Forward declarations */
 struct _PyArgv;
 struct pyruntimestate;
@@ -31,64 +49,71 @@
 
 /* Various one-time initializers */
 
-extern PyStatus _PyUnicode_Init(void);
+extern PyStatus _PyUnicode_Init(PyInterpreterState *interp);
+extern PyStatus _PyUnicode_InitTypes(void);
+extern PyStatus _PyBytes_Init(PyInterpreterState *interp);
 extern int _PyStructSequence_Init(void);
-extern int _PyLong_Init(PyThreadState *tstate);
+extern int _PyLong_Init(PyInterpreterState *interp);
+extern int _PyLong_InitTypes(void);
+extern PyStatus _PyTuple_Init(PyInterpreterState *interp);
 extern PyStatus _PyFaulthandler_Init(int enable);
 extern int _PyTraceMalloc_Init(int enable);
-extern PyObject * _PyBuiltin_Init(PyThreadState *tstate);
+extern PyObject * _PyBuiltin_Init(PyInterpreterState *interp);
 extern PyStatus _PySys_Create(
     PyThreadState *tstate,
     PyObject **sysmod_p);
 extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options);
 extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config);
-extern int _PySys_InitMain(PyThreadState *tstate);
-extern PyStatus _PyExc_Init(void);
-extern PyStatus _PyErr_Init(void);
+extern int _PySys_UpdateConfig(PyThreadState *tstate);
+extern PyStatus _PyExc_Init(PyInterpreterState *interp);
+extern PyStatus _PyErr_InitTypes(void);
 extern PyStatus _PyBuiltins_AddExceptions(PyObject * bltinmod);
-extern PyStatus _PyImportHooks_Init(PyThreadState *tstate);
-extern int _PyFloat_Init(void);
+extern void _PyFloat_Init(void);
+extern int _PyFloat_InitTypes(void);
 extern PyStatus _Py_HashRandomization_Init(const PyConfig *);
 
 extern PyStatus _PyTypes_Init(void);
 extern PyStatus _PyTypes_InitSlotDefs(void);
 extern PyStatus _PyImportZip_Init(PyThreadState *tstate);
-extern PyStatus _PyGC_Init(PyThreadState *tstate);
+extern PyStatus _PyGC_Init(PyInterpreterState *interp);
+extern PyStatus _PyAtExit_Init(PyInterpreterState *interp);
 
 
 /* Various internal finalizers */
 
-extern void _PyFrame_Fini(void);
-extern void _PyDict_Fini(void);
-extern void _PyTuple_Fini(void);
-extern void _PyList_Fini(void);
-extern void _PySet_Fini(void);
-extern void _PyBytes_Fini(void);
-extern void _PyFloat_Fini(void);
-extern void _PySlice_Fini(void);
-extern void _PyAsyncGen_Fini(void);
+extern void _PyFrame_Fini(PyInterpreterState *interp);
+extern void _PyDict_Fini(PyInterpreterState *interp);
+extern void _PyTuple_Fini(PyInterpreterState *interp);
+extern void _PyList_Fini(PyInterpreterState *interp);
+extern void _PyBytes_Fini(PyInterpreterState *interp);
+extern void _PyFloat_Fini(PyInterpreterState *interp);
+extern void _PySlice_Fini(PyInterpreterState *interp);
+extern void _PyAsyncGen_Fini(PyInterpreterState *interp);
 
 extern int _PySignal_Init(int install_signal_handlers);
-extern void PyOS_FiniInterrupts(void);
+extern void _PySignal_Fini(void);
 
-extern void _PyExc_Fini(void);
+extern void _PyExc_Fini(PyInterpreterState *interp);
 extern void _PyImport_Fini(void);
 extern void _PyImport_Fini2(void);
-extern void _PyGC_Fini(PyThreadState *tstate);
-extern void _PyType_Fini(void);
+extern void _PyGC_Fini(PyInterpreterState *interp);
+extern void _PyType_Fini(PyInterpreterState *interp);
 extern void _Py_HashRandomization_Fini(void);
-extern void _PyUnicode_Fini(PyThreadState *tstate);
-extern void _PyLong_Fini(PyThreadState *tstate);
+extern void _PyUnicode_Fini(PyInterpreterState *interp);
+extern void _PyUnicode_ClearInterned(PyInterpreterState *interp);
+extern void _PyLong_Fini(PyInterpreterState *interp);
 extern void _PyFaulthandler_Fini(void);
 extern void _PyHash_Fini(void);
 extern void _PyTraceMalloc_Fini(void);
 extern void _PyWarnings_Fini(PyInterpreterState *interp);
-extern void _PyAST_Fini(void);
+extern void _PyAST_Fini(PyInterpreterState *interp);
+extern void _PyAtExit_Fini(PyInterpreterState *interp);
 
-extern PyStatus _PyGILState_Init(PyThreadState *tstate);
-extern void _PyGILState_Fini(PyThreadState *tstate);
+extern PyStatus _PyGILState_Init(_PyRuntimeState *runtime);
+extern PyStatus _PyGILState_SetTstate(PyThreadState *tstate);
+extern void _PyGILState_Fini(PyInterpreterState *interp);
 
-PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyThreadState *tstate);
+PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyInterpreterState *interp);
 
 PyAPI_FUNC(PyStatus) _Py_PreInitializeFromPyArgv(
     const PyPreConfig *src_config,
@@ -108,6 +133,8 @@
 
 PyAPI_FUNC(void) _PyThreadState_DeleteCurrent(PyThreadState *tstate);
 
+extern void _PyAtExit_Call(PyInterpreterState *interp);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/internal/pycore_pymem.h b/include/internal/pycore_pymem.h
index 3d925e2..49af171 100644
--- a/include/internal/pycore_pymem.h
+++ b/include/internal/pycore_pymem.h
@@ -42,7 +42,7 @@
    fills newly allocated memory with CLEANBYTE (0xCD) and newly freed memory
    with DEADBYTE (0xDD). Detect also "untouchable bytes" marked
    with FORBIDDENBYTE (0xFD). */
-static inline int _PyMem_IsPtrFreed(void *ptr)
+static inline int _PyMem_IsPtrFreed(const void *ptr)
 {
     uintptr_t value = (uintptr_t)ptr;
 #if SIZEOF_VOID_P == 8
@@ -69,9 +69,6 @@
    PYMEM_ALLOCATOR_NOT_SET does nothing. */
 PyAPI_FUNC(int) _PyMem_SetupAllocators(PyMemAllocatorName allocator);
 
-/* bpo-35053: Expose _Py_tracemalloc_config for _Py_NewReference()
-   which access directly _Py_tracemalloc_config.tracing for best
-   performances. */
 struct _PyTraceMalloc_Config {
     /* Module initialized?
        Variable protected by the GIL */
diff --git a/include/internal/pycore_pystate.h b/include/internal/pycore_pystate.h
index 835d6e0..4b894f3 100644
--- a/include/internal/pycore_pystate.h
+++ b/include/internal/pycore_pystate.h
@@ -22,11 +22,11 @@
 
 
 static inline int
-_Py_IsMainInterpreter(PyThreadState* tstate)
+_Py_IsMainInterpreter(PyInterpreterState *interp)
 {
     /* Use directly _PyRuntime rather than tstate->interp->runtime, since
        this function is used in performance critical code path (ceval) */
-    return (tstate->interp == _PyRuntime.interpreters.main);
+    return (interp == _PyRuntime.interpreters.main);
 }
 
 
@@ -49,10 +49,18 @@
 /* Variable and macro for in-line access to current thread
    and interpreter state */
 
+#ifdef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
+PyAPI_FUNC(PyThreadState*) _PyThreadState_GetTSS(void);
+#endif
+
 static inline PyThreadState*
 _PyRuntimeState_GetThreadState(_PyRuntimeState *runtime)
 {
+#ifdef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
+    return _PyThreadState_GetTSS();
+#else
     return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->gilstate.tstate_current);
+#endif
 }
 
 /* Get the current Python thread state.
@@ -67,7 +75,11 @@
 static inline PyThreadState*
 _PyThreadState_GET(void)
 {
+#ifdef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
+    return _PyThreadState_GetTSS();
+#else
     return _PyRuntimeState_GetThreadState(&_PyRuntime);
+#endif
 }
 
 /* Redefine PyThreadState_GET() as an alias to _PyThreadState_GET() */
@@ -119,9 +131,12 @@
     PyThreadState *newts);
 
 PyAPI_FUNC(PyStatus) _PyInterpreterState_Enable(_PyRuntimeState *runtime);
-PyAPI_FUNC(void) _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
 
-PyAPI_FUNC(void) _PyGILState_Reinit(_PyRuntimeState *runtime);
+#ifdef HAVE_FORK
+extern PyStatus _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
+extern PyStatus _PyGILState_Reinit(_PyRuntimeState *runtime);
+extern void _PySignal_AfterFork(void);
+#endif
 
 
 PyAPI_FUNC(int) _PyState_AddModule(
diff --git a/include/internal/pycore_runtime.h b/include/internal/pycore_runtime.h
index 34eb492..bcd710c 100644
--- a/include/internal/pycore_runtime.h
+++ b/include/internal/pycore_runtime.h
@@ -19,7 +19,9 @@
        the main thread of the main interpreter can handle signals: see
        _Py_ThreadCanHandleSignals(). */
     _Py_atomic_int signals_pending;
+#ifndef EXPERIMENTAL_ISOLATED_SUBINTERPRETERS
     struct _gil_runtime_state gil;
+#endif
 };
 
 /* GIL state */
@@ -47,6 +49,13 @@
     void *userData;
 } _Py_AuditHookEntry;
 
+struct _Py_unicode_runtime_ids {
+    PyThread_type_lock lock;
+    // next_index value must be preserved when Py_Initialize()/Py_Finalize()
+    // is called multiple times: see _PyUnicode_FromId() implementation.
+    Py_ssize_t next_index;
+};
+
 /* Full Python runtime state */
 
 typedef struct pyruntimestate {
@@ -100,10 +109,14 @@
 
     PyPreConfig preconfig;
 
+    // Audit values must be preserved when Py_Initialize()/Py_Finalize()
+    // is called multiple times.
     Py_OpenCodeHookFunction open_code_hook;
     void *open_code_userdata;
     _Py_AuditHookEntry *audit_hook_head;
 
+    struct _Py_unicode_runtime_ids unicode_ids;
+
     // XXX Consolidate globals found via the check-c-globals script.
 } _PyRuntimeState;
 
@@ -118,7 +131,7 @@
 PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime);
 
 #ifdef HAVE_FORK
-PyAPI_FUNC(void) _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
+extern PyStatus _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
 #endif
 
 /* Initialize _PyRuntimeState.
diff --git a/include/internal/pycore_structseq.h b/include/internal/pycore_structseq.h
new file mode 100644
index 0000000..84c8d47
--- /dev/null
+++ b/include/internal/pycore_structseq.h
@@ -0,0 +1,21 @@
+#ifndef Py_INTERNAL_STRUCTSEQ_H
+#define Py_INTERNAL_STRUCTSEQ_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+
+PyAPI_FUNC(int) _PyStructSequence_InitType(
+    PyTypeObject *type,
+    PyStructSequence_Desc *desc,
+    unsigned long tp_flags);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_STRUCTSEQ_H */
diff --git a/include/symtable.h b/include/internal/pycore_symtable.h
similarity index 80%
rename from include/symtable.h
rename to include/internal/pycore_symtable.h
index abd19a7..a2e520b 100644
--- a/include/symtable.h
+++ b/include/internal/pycore_symtable.h
@@ -1,19 +1,25 @@
-#ifndef Py_LIMITED_API
-#ifndef Py_SYMTABLE_H
-#define Py_SYMTABLE_H
+#ifndef Py_INTERNAL_SYMTABLE_H
+#define Py_INTERNAL_SYMTABLE_H
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include "Python-ast.h"   /* mod_ty */
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
 
-/* XXX(ncoghlan): This is a weird mix of public names and interpreter internal
- *                names.
- */
+struct _mod;   // Type defined in pycore_ast.h
 
-typedef enum _block_type { FunctionBlock, ClassBlock, ModuleBlock }
+typedef enum _block_type { FunctionBlock, ClassBlock, ModuleBlock, AnnotationBlock }
     _Py_block_ty;
 
+typedef enum _comprehension_type {
+    NoComprehension = 0,
+    ListComprehension = 1,
+    DictComprehension = 2,
+    SetComprehension = 3,
+    GeneratorExpression = 4 } _Py_comprehension_ty;
+
 struct _symtable_entry;
 
 struct symtable {
@@ -43,14 +49,14 @@
     PyObject *ste_varnames;  /* list of function parameters */
     PyObject *ste_children;  /* list of child blocks */
     PyObject *ste_directives;/* locations of global and nonlocal statements */
-    _Py_block_ty ste_type;   /* module, class, or function */
+    _Py_block_ty ste_type;   /* module, class or function */
     int ste_nested;      /* true if block is nested */
     unsigned ste_free : 1;        /* true if block has free variables */
     unsigned ste_child_free : 1;  /* true if a child block has free vars,
                                      including free refs to globals */
     unsigned ste_generator : 1;   /* true if namespace is a generator */
     unsigned ste_coroutine : 1;   /* true if namespace is a coroutine */
-    unsigned ste_comprehension : 1; /* true if namespace is a list comprehension */
+    _Py_comprehension_ty ste_comprehension;  /* Kind of comprehension (if any) */
     unsigned ste_varargs : 1;     /* true if block has varargs */
     unsigned ste_varkeywords : 1; /* true if block has varkeywords */
     unsigned ste_returns_value : 1;  /* true if namespace uses return with
@@ -62,28 +68,26 @@
     int ste_comp_iter_expr; /* non-zero if visiting a comprehension range expression */
     int ste_lineno;          /* first line of block */
     int ste_col_offset;      /* offset of first line of block */
+    int ste_end_lineno;      /* end line of block */
+    int ste_end_col_offset;  /* end offset of first line of block */
     int ste_opt_lineno;      /* lineno of last exec or import * */
     int ste_opt_col_offset;  /* offset of last exec or import * */
     struct symtable *ste_table;
 } PySTEntryObject;
 
-PyAPI_DATA(PyTypeObject) PySTEntry_Type;
+extern PyTypeObject PySTEntry_Type;
 
 #define PySTEntry_Check(op) Py_IS_TYPE(op, &PySTEntry_Type)
 
-PyAPI_FUNC(int) PyST_GetScope(PySTEntryObject *, PyObject *);
+extern int _PyST_GetScope(PySTEntryObject *, PyObject *);
 
-PyAPI_FUNC(struct symtable *) PySymtable_Build(
-    mod_ty mod,
-    const char *filename,       /* decoded from the filesystem encoding */
-    PyFutureFeatures *future);
-PyAPI_FUNC(struct symtable *) PySymtable_BuildObject(
-    mod_ty mod,
+extern struct symtable* _PySymtable_Build(
+    struct _mod *mod,
     PyObject *filename,
     PyFutureFeatures *future);
 PyAPI_FUNC(PySTEntryObject *) PySymtable_Lookup(struct symtable *, void *);
 
-PyAPI_FUNC(void) PySymtable_Free(struct symtable *);
+extern void _PySymtable_Free(struct symtable *);
 
 /* Flags for def-use information */
 
@@ -116,8 +120,14 @@
 #define GENERATOR 1
 #define GENERATOR_EXPRESSION 2
 
+// Used by symtablemodule.c
+extern struct symtable* _Py_SymtableStringObjectFlags(
+    const char *str,
+    PyObject *filename,
+    int start,
+    PyCompilerFlags *flags);
+
 #ifdef __cplusplus
 }
 #endif
-#endif /* !Py_SYMTABLE_H */
-#endif /* !Py_LIMITED_API */
+#endif /* !Py_INTERNAL_SYMTABLE_H */
diff --git a/include/internal/pycore_traceback.h b/include/internal/pycore_traceback.h
index 1f09241..c01a476 100644
--- a/include/internal/pycore_traceback.h
+++ b/include/internal/pycore_traceback.h
@@ -51,7 +51,7 @@
    _PyGILState_GetInterpreterStateUnsafe() in last resort.
 
    It is better to pass NULL to interp and current_tstate, the function tries
-   different options to retrieve these informations.
+   different options to retrieve this information.
 
    This function is signal safe. */
 
@@ -74,17 +74,13 @@
    This function is signal safe. */
 PyAPI_FUNC(void) _Py_DumpDecimal(
     int fd,
-    unsigned long value);
+    size_t value);
 
-/* Format an integer as hexadecimal into the file descriptor fd with at least
-   width digits.
-
-   The maximum width is sizeof(unsigned long)*2 digits.
-
-   This function is signal safe. */
+/* Format an integer as hexadecimal with width digits into fd file descriptor.
+   The function is signal safe. */
 PyAPI_FUNC(void) _Py_DumpHexadecimal(
     int fd,
-    unsigned long value,
+    uintptr_t value,
     Py_ssize_t width);
 
 PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame(
diff --git a/include/internal/pycore_tupleobject.h b/include/internal/pycore_tuple.h
similarity index 74%
rename from include/internal/pycore_tupleobject.h
rename to include/internal/pycore_tuple.h
index f95f16c..5353e18 100644
--- a/include/internal/pycore_tupleobject.h
+++ b/include/internal/pycore_tuple.h
@@ -1,5 +1,5 @@
-#ifndef Py_INTERNAL_TUPLEOBJECT_H
-#define Py_INTERNAL_TUPLEOBJECT_H
+#ifndef Py_INTERNAL_TUPLE_H
+#define Py_INTERNAL_TUPLE_H
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -11,9 +11,10 @@
 #include "tupleobject.h"   /* _PyTuple_CAST() */
 
 #define _PyTuple_ITEMS(op) (_PyTuple_CAST(op)->ob_item)
+
 PyAPI_FUNC(PyObject *) _PyTuple_FromArray(PyObject *const *, Py_ssize_t);
 
 #ifdef __cplusplus
 }
 #endif
-#endif   /* !Py_INTERNAL_TUPLEOBJECT_H */
+#endif   /* !Py_INTERNAL_TUPLE_H */
diff --git a/include/internal/pycore_ucnhash.h b/include/internal/pycore_ucnhash.h
new file mode 100644
index 0000000..187dd68
--- /dev/null
+++ b/include/internal/pycore_ucnhash.h
@@ -0,0 +1,34 @@
+/* Unicode name database interface */
+#ifndef Py_INTERNAL_UCNHASH_H
+#define Py_INTERNAL_UCNHASH_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+/* revised ucnhash CAPI interface (exported through a "wrapper") */
+
+#define PyUnicodeData_CAPSULE_NAME "unicodedata._ucnhash_CAPI"
+
+typedef struct {
+
+    /* Get name for a given character code.
+       Returns non-zero if success, zero if not.
+       Does not set Python exceptions. */
+    int (*getname)(Py_UCS4 code, char* buffer, int buflen,
+                   int with_alias_and_seq);
+
+    /* Get character code for a given name.
+       Same error handling as for getname(). */
+    int (*getcode)(const char* name, int namelen, Py_UCS4* code,
+                   int with_named_seq);
+
+} _PyUnicode_Name_CAPI;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_UCNHASH_H */
diff --git a/include/internal/pycore_unionobject.h b/include/internal/pycore_unionobject.h
new file mode 100644
index 0000000..9962f57
--- /dev/null
+++ b/include/internal/pycore_unionobject.h
@@ -0,0 +1,22 @@
+#ifndef Py_INTERNAL_UNIONOBJECT_H
+#define Py_INTERNAL_UNIONOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+#  error "this header requires Py_BUILD_CORE define"
+#endif
+
+extern PyTypeObject _PyUnion_Type;
+#define _PyUnion_Check(op) Py_IS_TYPE(op, &_PyUnion_Type)
+extern PyObject *_Py_union_type_or(PyObject *, PyObject *);
+
+#define _PyGenericAlias_Check(op) PyObject_TypeCheck(op, &Py_GenericAliasType)
+extern PyObject *_Py_subs_parameters(PyObject *, PyObject *, PyObject *, PyObject *);
+extern PyObject *_Py_make_parameters(PyObject *);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_UNIONOBJECT_H */
diff --git a/include/internal/pycore_warnings.h b/include/internal/pycore_warnings.h
index cafe305..f728ec3 100644
--- a/include/internal/pycore_warnings.h
+++ b/include/internal/pycore_warnings.h
@@ -17,7 +17,7 @@
     long filters_version;
 };
 
-extern PyStatus _PyWarnings_InitState(PyThreadState *tstate);
+extern int _PyWarnings_InitState(PyInterpreterState *interp);
 
 #ifdef __cplusplus
 }
diff --git a/include/intrcheck.h b/include/intrcheck.h
index e5bf5a8..b8cc656 100644
--- a/include/intrcheck.h
+++ b/include/intrcheck.h
@@ -1,4 +1,3 @@
-
 #ifndef Py_INTRCHECK_H
 #define Py_INTRCHECK_H
 #ifdef __cplusplus
@@ -6,7 +5,6 @@
 #endif
 
 PyAPI_FUNC(int) PyOS_InterruptOccurred(void);
-PyAPI_FUNC(void) PyOS_InitInterrupts(void);
 #ifdef HAVE_FORK
 #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
 PyAPI_FUNC(void) PyOS_BeforeFork(void);
@@ -19,7 +17,6 @@
 
 #ifndef Py_LIMITED_API
 PyAPI_FUNC(int) _PyOS_IsMainThread(void);
-PyAPI_FUNC(void) _PySignal_AfterFork(void);
 
 #ifdef MS_WINDOWS
 /* windows.h is not included by Python.h so use void* instead of HANDLE */
diff --git a/include/iterobject.h b/include/iterobject.h
index 51139bf..6454611 100644
--- a/include/iterobject.h
+++ b/include/iterobject.h
@@ -7,6 +7,9 @@
 
 PyAPI_DATA(PyTypeObject) PySeqIter_Type;
 PyAPI_DATA(PyTypeObject) PyCallIter_Type;
+#ifdef Py_BUILD_CORE
+extern PyTypeObject _PyAnextAwaitable_Type;
+#endif
 
 #define PySeqIter_Check(op) Py_IS_TYPE(op, &PySeqIter_Type)
 
diff --git a/include/longobject.h b/include/longobject.h
index 1b28809..e2301d7 100644
--- a/include/longobject.h
+++ b/include/longobject.h
@@ -102,8 +102,6 @@
 
 PyAPI_FUNC(PyObject *) PyLong_FromString(const char *, char **, int);
 #ifndef Py_LIMITED_API
-Py_DEPRECATED(3.3)
-PyAPI_FUNC(PyObject *) PyLong_FromUnicode(Py_UNICODE*, Py_ssize_t, int);
 PyAPI_FUNC(PyObject *) PyLong_FromUnicodeObject(PyObject *u, int base);
 PyAPI_FUNC(PyObject *) _PyLong_FromBytes(const char *, Py_ssize_t, int);
 #endif
@@ -173,23 +171,6 @@
     unsigned char* bytes, size_t n,
     int little_endian, int is_signed);
 
-/* _PyLong_FromNbInt: Convert the given object to a PyLongObject
-   using the nb_int slot, if available.  Raise TypeError if either the
-   nb_int slot is not available or the result of the call to nb_int
-   returns something not of type int.
-*/
-PyAPI_FUNC(PyObject *) _PyLong_FromNbInt(PyObject *);
-
-/* Convert the given object to a PyLongObject using the nb_index or
-   nb_int slots, if available (the latter is deprecated).
-   Raise TypeError if either nb_index and nb_int slots are not
-   available or the result of the call to nb_index or nb_int
-   returns something not of type int.
-   Should be replaced with PyNumber_Index after the end of the
-   deprecation period.
-*/
-PyAPI_FUNC(PyObject *) _PyLong_FromNbIndexOrNbInt(PyObject *);
-
 /* _PyLong_Format: Convert the long to a string object with given base,
    appending a base prefix of 0[box] if base is 2, 8 or 16. */
 PyAPI_FUNC(PyObject *) _PyLong_Format(PyObject *obj, int base);
@@ -229,9 +210,6 @@
 #endif /* !Py_LIMITED_API */
 
 #ifndef Py_LIMITED_API
-PyAPI_DATA(PyObject *) _PyLong_Zero;
-PyAPI_DATA(PyObject *) _PyLong_One;
-
 PyAPI_FUNC(PyObject *) _PyLong_Rshift(PyObject *, size_t);
 PyAPI_FUNC(PyObject *) _PyLong_Lshift(PyObject *, size_t);
 #endif
diff --git a/include/methodobject.h b/include/methodobject.h
index 12e049b..9ffe8e1 100644
--- a/include/methodobject.h
+++ b/include/methodobject.h
@@ -41,7 +41,13 @@
 };
 typedef struct PyMethodDef PyMethodDef;
 
+/* PyCFunction_New is declared as a function for stable ABI (declaration is
+ * needed for e.g. GCC with -fvisibility=hidden), but redefined as a macro
+ * that calls PyCFunction_NewEx. */
+PyAPI_FUNC(PyObject *) PyCFunction_New(PyMethodDef *, PyObject *);
 #define PyCFunction_New(ML, SELF) PyCFunction_NewEx((ML), (SELF), NULL)
+
+/* PyCFunction_NewEx is similar: on 3.9+, this calls PyCMethod_New. */
 PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *,
                                          PyObject *);
 
@@ -73,15 +79,15 @@
 
 #define METH_COEXIST   0x0040
 
-#ifndef Py_LIMITED_API
-#define METH_FASTCALL  0x0080
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030a0000
+#  define METH_FASTCALL  0x0080
 #endif
 
 /* This bit is preserved for Stackless Python */
 #ifdef STACKLESS
-#define METH_STACKLESS 0x0100
+#  define METH_STACKLESS 0x0100
 #else
-#define METH_STACKLESS 0x0000
+#  define METH_STACKLESS 0x0000
 #endif
 
 /* METH_METHOD means the function stores an
diff --git a/include/modsupport.h b/include/modsupport.h
index 4c4aab6..f009d58 100644
--- a/include/modsupport.h
+++ b/include/modsupport.h
@@ -136,7 +136,15 @@
 void _PyArg_Fini(void);
 #endif   /* Py_LIMITED_API */
 
-PyAPI_FUNC(int) PyModule_AddObject(PyObject *, const char *, PyObject *);
+// Add an attribute with name 'name' and value 'obj' to the module 'mod.
+// On success, return 0 on success.
+// On error, raise an exception and return -1.
+PyAPI_FUNC(int) PyModule_AddObjectRef(PyObject *mod, const char *name, PyObject *value);
+
+// Similar to PyModule_AddObjectRef() but steal a reference to 'obj'
+// (Py_DECREF(obj)) on success (if it returns 0).
+PyAPI_FUNC(int) PyModule_AddObject(PyObject *mod, const char *, PyObject *value);
+
 PyAPI_FUNC(int) PyModule_AddIntConstant(PyObject *, const char *, long);
 PyAPI_FUNC(int) PyModule_AddStringConstant(PyObject *, const char *, const char *);
 #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
diff --git a/include/moduleobject.h b/include/moduleobject.h
index cf9ad40..49b116c 100644
--- a/include/moduleobject.h
+++ b/include/moduleobject.h
@@ -84,6 +84,12 @@
   freefunc m_free;
 } PyModuleDef;
 
+
+// Internal C API
+#ifdef Py_BUILD_CORE
+extern int _PyModule_IsExtension(PyObject *obj);
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/node.h b/include/node.h
deleted file mode 100644
index ca24f28..0000000
--- a/include/node.h
+++ /dev/null
@@ -1,47 +0,0 @@
-
-/* Parse tree node interface */
-
-#ifndef Py_NODE_H
-#define Py_NODE_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct _node {
-    short               n_type;
-    char                *n_str;
-    int                 n_lineno;
-    int                 n_col_offset;
-    int                 n_nchildren;
-    struct _node        *n_child;
-    int                 n_end_lineno;
-    int                 n_end_col_offset;
-} node;
-
-PyAPI_FUNC(node *) PyNode_New(int type);
-PyAPI_FUNC(int) PyNode_AddChild(node *n, int type,
-                                char *str, int lineno, int col_offset,
-                                int end_lineno, int end_col_offset);
-PyAPI_FUNC(void) PyNode_Free(node *n);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(Py_ssize_t) _PyNode_SizeOf(node *n);
-#endif
-
-/* Node access functions */
-#define NCH(n)          ((n)->n_nchildren)
-
-#define CHILD(n, i)     (&(n)->n_child[i])
-#define TYPE(n)         ((n)->n_type)
-#define STR(n)          ((n)->n_str)
-#define LINENO(n)       ((n)->n_lineno)
-
-/* Assert that the type of a node is what we expect */
-#define REQ(n, type) assert(TYPE(n) == (type))
-
-PyAPI_FUNC(void) PyNode_ListTree(node *);
-void _PyNode_FinalizeEndPos(node *n);  // helper also used in parsetok.c
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_NODE_H */
diff --git a/include/object.h b/include/object.h
index 9c1a7f4..61e638c 100644
--- a/include/object.h
+++ b/include/object.h
@@ -54,11 +54,11 @@
 
 /* Py_DEBUG implies Py_REF_DEBUG. */
 #if defined(Py_DEBUG) && !defined(Py_REF_DEBUG)
-#define Py_REF_DEBUG
+#  define Py_REF_DEBUG
 #endif
 
-#if defined(Py_LIMITED_API) && defined(Py_REF_DEBUG)
-#error Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, and Py_REF_DEBUG
+#if defined(Py_LIMITED_API) && defined(Py_TRACE_REFS)
+#  error Py_LIMITED_API is incompatible with Py_TRACE_REFS
 #endif
 
 /* PyTypeObject structure is defined in cpython/object.h.
@@ -74,8 +74,8 @@
 #define _PyObject_EXTRA_INIT 0, 0,
 
 #else
-#define _PyObject_HEAD_EXTRA
-#define _PyObject_EXTRA_INIT
+#  define _PyObject_HEAD_EXTRA
+#  define _PyObject_EXTRA_INIT
 #endif
 
 /* PyObject_HEAD defines the initial segment of every PyObject. */
@@ -119,26 +119,47 @@
 
 /* Cast argument to PyVarObject* type. */
 #define _PyVarObject_CAST(op) ((PyVarObject*)(op))
+#define _PyVarObject_CAST_CONST(op) ((const PyVarObject*)(op))
 
-#define Py_REFCNT(ob)           (_PyObject_CAST(ob)->ob_refcnt)
+
+// Test if the 'x' object is the 'y' object, the same as "x is y" in Python.
+PyAPI_FUNC(int) Py_Is(PyObject *x, PyObject *y);
+#define Py_Is(x, y) ((x) == (y))
+
+
+static inline Py_ssize_t _Py_REFCNT(const PyObject *ob) {
+    return ob->ob_refcnt;
+}
+#define Py_REFCNT(ob) _Py_REFCNT(_PyObject_CAST_CONST(ob))
+
+
+// bpo-39573: The Py_SET_TYPE() function must be used to set an object type.
 #define Py_TYPE(ob)             (_PyObject_CAST(ob)->ob_type)
+
+// bpo-39573: The Py_SET_SIZE() function must be used to set an object size.
 #define Py_SIZE(ob)             (_PyVarObject_CAST(ob)->ob_size)
 
+
 static inline int _Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+    // bpo-44378: Don't use Py_TYPE() since Py_TYPE() requires a non-const
+    // object.
     return ob->ob_type == type;
 }
 #define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST_CONST(ob), type)
 
+
 static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) {
     ob->ob_refcnt = refcnt;
 }
 #define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt)
 
+
 static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) {
     ob->ob_type = type;
 }
 #define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type)
 
+
 static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) {
     ob->ob_size = size;
 }
@@ -221,8 +242,11 @@
 
 /* Generic type check */
 PyAPI_FUNC(int) PyType_IsSubtype(PyTypeObject *, PyTypeObject *);
-#define PyObject_TypeCheck(ob, tp) \
-    (Py_IS_TYPE(ob, tp) || PyType_IsSubtype(Py_TYPE(ob), (tp)))
+
+static inline int _PyObject_TypeCheck(PyObject *ob, PyTypeObject *type) {
+    return Py_IS_TYPE(ob, type) || PyType_IsSubtype(Py_TYPE(ob), type);
+}
+#define PyObject_TypeCheck(ob, type) _PyObject_TypeCheck(_PyObject_CAST(ob), type)
 
 PyAPI_DATA(PyTypeObject) PyType_Type; /* built-in 'type' */
 PyAPI_DATA(PyTypeObject) PyBaseObject_Type; /* built-in 'object' */
@@ -298,6 +322,20 @@
 given type object has a specified feature.
 */
 
+#ifndef Py_LIMITED_API
+/* Set if instances of the type object are treated as sequences for pattern matching */
+#define Py_TPFLAGS_SEQUENCE (1 << 5)
+/* Set if instances of the type object are treated as mappings for pattern matching */
+#define Py_TPFLAGS_MAPPING (1 << 6)
+#endif
+
+/* Disallow creating instances of the type: set tp_new to NULL and don't create
+ * the "__new__" key in the type dictionary. */
+#define Py_TPFLAGS_DISALLOW_INSTANTIATION (1UL << 7)
+
+/* Set if the type object is immutable: type attributes cannot be set nor deleted */
+#define Py_TPFLAGS_IMMUTABLETYPE (1UL << 8)
+
 /* Set if the type object is dynamically allocated */
 #define Py_TPFLAGS_HEAPTYPE (1UL << 9)
 
@@ -330,13 +368,17 @@
 /* Objects behave like an unbound method */
 #define Py_TPFLAGS_METHOD_DESCRIPTOR (1UL << 17)
 
-/* Objects support type attribute cache */
-#define Py_TPFLAGS_HAVE_VERSION_TAG   (1UL << 18)
+/* Object has up-to-date type attribute cache */
 #define Py_TPFLAGS_VALID_VERSION_TAG  (1UL << 19)
 
 /* Type is abstract and cannot be instantiated */
 #define Py_TPFLAGS_IS_ABSTRACT (1UL << 20)
 
+// This undocumented flag gives certain built-ins their unique pattern-matching
+// behavior, which allows a single positional subpattern to match against the
+// subject itself (rather than a mapped attribute on it):
+#define _Py_TPFLAGS_MATCH_SELF (1UL << 22)
+
 /* These flags are used to determine if a type is a subclass. */
 #define Py_TPFLAGS_LONG_SUBCLASS        (1UL << 24)
 #define Py_TPFLAGS_LIST_SUBCLASS        (1UL << 25)
@@ -349,19 +391,23 @@
 
 #define Py_TPFLAGS_DEFAULT  ( \
                  Py_TPFLAGS_HAVE_STACKLESS_EXTENSION | \
-                 Py_TPFLAGS_HAVE_VERSION_TAG | \
                 0)
 
-/* NOTE: The following flags reuse lower bits (removed as part of the
+/* NOTE: Some of the following flags reuse lower bits (removed as part of the
  * Python 3.0 transition). */
 
-/* The following flag is kept for compatibility. Starting with 3.8,
- * binary compatibility of C extensions across feature releases of
- * Python is not supported anymore, except when using the stable ABI.
+/* The following flags are kept for compatibility; in previous
+ * versions they indicated presence of newer tp_* fields on the
+ * type struct.
+ * Starting with 3.8, binary compatibility of C extensions across
+ * feature releases of Python is not supported anymore (except when
+ * using the stable ABI, in which all classes are created dynamically,
+ * using the interpreter's memory layout.)
+ * Note that older extensions using the stable ABI set these flags,
+ * so the bits must not be repurposed.
  */
-
-/* Type structure has tp_finalize member (3.4) */
 #define Py_TPFLAGS_HAVE_FINALIZE (1UL << 0)
+#define Py_TPFLAGS_HAVE_VERSION_TAG   (1UL << 18)
 
 
 /*
@@ -400,22 +446,46 @@
 
 PyAPI_FUNC(void) _Py_Dealloc(PyObject *);
 
+/*
+These are provided as conveniences to Python runtime embedders, so that
+they can have object code that is not dependent on Python compilation flags.
+*/
+PyAPI_FUNC(void) Py_IncRef(PyObject *);
+PyAPI_FUNC(void) Py_DecRef(PyObject *);
+
+// Similar to Py_IncRef() and Py_DecRef() but the argument must be non-NULL.
+// Private functions used by Py_INCREF() and Py_DECREF().
+PyAPI_FUNC(void) _Py_IncRef(PyObject *);
+PyAPI_FUNC(void) _Py_DecRef(PyObject *);
+
 static inline void _Py_INCREF(PyObject *op)
 {
+#if defined(Py_REF_DEBUG) && defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000
+    // Stable ABI for Python 3.10 built in debug mode.
+    _Py_IncRef(op);
+#else
+    // Non-limited C API and limited C API for Python 3.9 and older access
+    // directly PyObject.ob_refcnt.
 #ifdef Py_REF_DEBUG
     _Py_RefTotal++;
 #endif
     op->ob_refcnt++;
+#endif
 }
-
 #define Py_INCREF(op) _Py_INCREF(_PyObject_CAST(op))
 
 static inline void _Py_DECREF(
-#ifdef Py_REF_DEBUG
+#if defined(Py_REF_DEBUG) && !(defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000)
     const char *filename, int lineno,
 #endif
     PyObject *op)
 {
+#if defined(Py_REF_DEBUG) && defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000
+    // Stable ABI for Python 3.10 built in debug mode.
+    _Py_DecRef(op);
+#else
+    // Non-limited C API and limited C API for Python 3.9 and older access
+    // directly PyObject.ob_refcnt.
 #ifdef Py_REF_DEBUG
     _Py_RefTotal--;
 #endif
@@ -429,9 +499,9 @@
     else {
         _Py_Dealloc(op);
     }
+#endif
 }
-
-#ifdef Py_REF_DEBUG
+#if defined(Py_REF_DEBUG) && !(defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000)
 #  define Py_DECREF(op) _Py_DECREF(__FILE__, __LINE__, _PyObject_CAST(op))
 #else
 #  define Py_DECREF(op) _Py_DECREF(_PyObject_CAST(op))
@@ -500,12 +570,31 @@
 
 #define Py_XDECREF(op) _Py_XDECREF(_PyObject_CAST(op))
 
-/*
-These are provided as conveniences to Python runtime embedders, so that
-they can have object code that is not dependent on Python compilation flags.
-*/
-PyAPI_FUNC(void) Py_IncRef(PyObject *);
-PyAPI_FUNC(void) Py_DecRef(PyObject *);
+// Create a new strong reference to an object:
+// increment the reference count of the object and return the object.
+PyAPI_FUNC(PyObject*) Py_NewRef(PyObject *obj);
+
+// Similar to Py_NewRef(), but the object can be NULL.
+PyAPI_FUNC(PyObject*) Py_XNewRef(PyObject *obj);
+
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+    Py_INCREF(obj);
+    return obj;
+}
+
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+    Py_XINCREF(obj);
+    return obj;
+}
+
+// Py_NewRef() and Py_XNewRef() are exported as functions for the stable ABI.
+// Names overridden with macros by static inline functions for best
+// performances.
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+
 
 /*
 _Py_NoneStruct is an object of undefined type which can be used in contexts
@@ -516,8 +605,12 @@
 PyAPI_DATA(PyObject) _Py_NoneStruct; /* Don't use this directly */
 #define Py_None (&_Py_NoneStruct)
 
+// Test if an object is the None singleton, the same as "x is None" in Python.
+PyAPI_FUNC(int) Py_IsNone(PyObject *x);
+#define Py_IsNone(x) Py_Is((x), Py_None)
+
 /* Macro for returning Py_None from a function */
-#define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None
+#define Py_RETURN_NONE return Py_NewRef(Py_None)
 
 /*
 Py_NotImplemented is a singleton used to signal that an operation is
@@ -527,8 +620,7 @@
 #define Py_NotImplemented (&_Py_NotImplementedStruct)
 
 /* Macro for returning Py_NotImplemented from a function */
-#define Py_RETURN_NOTIMPLEMENTED \
-    return Py_INCREF(Py_NotImplemented), Py_NotImplemented
+#define Py_RETURN_NOTIMPLEMENTED return Py_NewRef(Py_NotImplemented)
 
 /* Rich comparison opcodes */
 #define Py_LT 0
@@ -538,6 +630,15 @@
 #define Py_GT 4
 #define Py_GE 5
 
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+/* Result of calling PyIter_Send */
+typedef enum {
+    PYGEN_RETURN = 0,
+    PYGEN_ERROR = -1,
+    PYGEN_NEXT = 1,
+} PySendResult;
+#endif
+
 /*
  * Macro for implementing rich comparisons
  *
diff --git a/include/objimpl.h b/include/objimpl.h
index 030d7ee..450befa 100644
--- a/include/objimpl.h
+++ b/include/objimpl.h
@@ -38,7 +38,7 @@
    object with room for n items.  In addition to the refcount and type pointer
    fields, this also fills in the ob_size field.
 
- - PyObject_Del(op) releases the memory allocated for an object.  It does not
+ - PyObject_Free(op) releases the memory allocated for an object.  It does not
    run a destructor -- it only frees the memory.  PyObject_Free is identical.
 
  - PyObject_Init(op, typeobj) and PyObject_InitVar(op, typeobj, n) don't
@@ -48,8 +48,8 @@
 
 Note that objects created with PyObject_{New, NewVar} are allocated using the
 specialized Python allocator (implemented in obmalloc.c), if WITH_PYMALLOC is
-enabled.  In addition, a special debugging allocator is used if PYMALLOC_DEBUG
-is also #defined.
+enabled.  In addition, a special debugging allocator is used if Py_DEBUG
+macro is also defined.
 
 In case a specific form of memory management is needed (for example, if you
 must use the platform malloc heap(s), or shared memory, or C++ local storage or
@@ -102,7 +102,9 @@
 PyAPI_FUNC(void) PyObject_Free(void *ptr);
 
 
-/* Macros */
+// Deprecated aliases only kept for backward compatibility.
+// PyObject_Del and PyObject_DEL are defined with no parameter to be able to
+// use them as function pointers (ex: tp_free = PyObject_Del).
 #define PyObject_MALLOC         PyObject_Malloc
 #define PyObject_REALLOC        PyObject_Realloc
 #define PyObject_FREE           PyObject_Free
@@ -118,7 +120,14 @@
 /* Functions */
 PyAPI_FUNC(PyObject *) PyObject_Init(PyObject *, PyTypeObject *);
 PyAPI_FUNC(PyVarObject *) PyObject_InitVar(PyVarObject *,
-                                                 PyTypeObject *, Py_ssize_t);
+                                           PyTypeObject *, Py_ssize_t);
+
+#define PyObject_INIT(op, typeobj) \
+    PyObject_Init(_PyObject_CAST(op), (typeobj))
+#define PyObject_INIT_VAR(op, typeobj, size) \
+    PyObject_InitVar(_PyVarObject_CAST(op), (typeobj), (size))
+
+
 PyAPI_FUNC(PyObject *) _PyObject_New(PyTypeObject *);
 PyAPI_FUNC(PyVarObject *) _PyObject_NewVar(PyTypeObject *, Py_ssize_t);
 
@@ -131,31 +140,22 @@
 #define PyObject_NewVar(type, typeobj, n) \
                 ( (type *) _PyObject_NewVar((typeobj), (n)) )
 
-// Alias to PyObject_New(). In Python 3.8, PyObject_NEW() called directly
-// PyObject_MALLOC() with _PyObject_VAR_SIZE().
+// Alias to PyObject_NewVar(). In Python 3.8, PyObject_NEW_VAR() called
+// directly PyObject_MALLOC() with _PyObject_VAR_SIZE().
 #define PyObject_NEW_VAR(type, typeobj, n) PyObject_NewVar(type, typeobj, n)
 
 
-#ifdef Py_LIMITED_API
-/* Define PyObject_INIT() and PyObject_INIT_VAR() as aliases to PyObject_Init()
-   and PyObject_InitVar() in the limited C API for compatibility with the
-   CPython C API. */
-#  define PyObject_INIT(op, typeobj) \
-        PyObject_Init(_PyObject_CAST(op), (typeobj))
-#  define PyObject_INIT_VAR(op, typeobj, size) \
-        PyObject_InitVar(_PyVarObject_CAST(op), (typeobj), (size))
-#else
-/* PyObject_INIT() and PyObject_INIT_VAR() are defined in cpython/objimpl.h */
-#endif
-
-
 /*
  * Garbage Collection Support
  * ==========================
  */
 
-/* C equivalent of gc.collect() which ignores the state of gc.enabled. */
+/* C equivalent of gc.collect(). */
 PyAPI_FUNC(Py_ssize_t) PyGC_Collect(void);
+/* C API for controlling the state of the garbage collector */
+PyAPI_FUNC(int) PyGC_Enable(void);
+PyAPI_FUNC(int) PyGC_Disable(void);
+PyAPI_FUNC(int) PyGC_IsEnabled(void);
 
 /* Test if a type has a GC head */
 #define PyType_IS_GC(t) PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
diff --git a/include/opcode.h b/include/opcode.h
index 19944fa..5203975 100644
--- a/include/opcode.h
+++ b/include/opcode.h
@@ -30,7 +30,11 @@
 #define BINARY_TRUE_DIVIDE       27
 #define INPLACE_FLOOR_DIVIDE     28
 #define INPLACE_TRUE_DIVIDE      29
-#define RERAISE                  48
+#define GET_LEN                  30
+#define MATCH_MAPPING            31
+#define MATCH_SEQUENCE           32
+#define MATCH_KEYS               33
+#define COPY_DICT_WITHOUT_KEYS   34
 #define WITH_EXCEPT_START        49
 #define GET_AITER                50
 #define GET_ANEXT                51
@@ -77,6 +81,7 @@
 #define DELETE_ATTR              96
 #define STORE_GLOBAL             97
 #define DELETE_GLOBAL            98
+#define ROT_N                    99
 #define LOAD_CONST              100
 #define LOAD_NAME               101
 #define BUILD_TUPLE             102
@@ -96,11 +101,13 @@
 #define LOAD_GLOBAL             116
 #define IS_OP                   117
 #define CONTAINS_OP             118
+#define RERAISE                 119
 #define JUMP_IF_NOT_EXC_MATCH   121
 #define SETUP_FINALLY           122
 #define LOAD_FAST               124
 #define STORE_FAST              125
 #define DELETE_FAST             126
+#define GEN_START               129
 #define RAISE_VARARGS           130
 #define CALL_FUNCTION           131
 #define MAKE_FUNCTION           132
@@ -117,6 +124,7 @@
 #define SET_ADD                 146
 #define MAP_ADD                 147
 #define LOAD_CLASSDEREF         148
+#define MATCH_CLASS             152
 #define SETUP_ASYNC_WITH        154
 #define FORMAT_VALUE            155
 #define BUILD_CONST_KEY_MAP     156
@@ -127,6 +135,28 @@
 #define SET_UPDATE              163
 #define DICT_MERGE              164
 #define DICT_UPDATE             165
+#ifdef NEED_OPCODE_JUMP_TABLES
+static uint32_t _PyOpcode_RelativeJump[8] = {
+    0U,
+    0U,
+    536870912U,
+    67125248U,
+    67141632U,
+    0U,
+    0U,
+    0U,
+};
+static uint32_t _PyOpcode_Jump[8] = {
+    0U,
+    0U,
+    536870912U,
+    101695488U,
+    67141632U,
+    0U,
+    0U,
+    0U,
+};
+#endif /* OPCODE_TABLES */
 
 /* EXCEPT_HANDLER is a special, implicit block type which is created when
    entering an except handler. It is not an opcode but we define it here
diff --git a/include/parsetok.h b/include/parsetok.h
deleted file mode 100644
index 935d733..0000000
--- a/include/parsetok.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* Parser-tokenizer link interface */
-
-#ifndef Py_LIMITED_API
-#ifndef Py_PARSETOK_H
-#define Py_PARSETOK_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "grammar.h"      /* grammar */
-#include "node.h"         /* node */
-
-typedef struct {
-    int error;
-    PyObject *filename;
-    int lineno;
-    int offset;
-    char *text;                 /* UTF-8-encoded string */
-    int token;
-    int expected;
-} perrdetail;
-
-#if 0
-#define PyPARSE_YIELD_IS_KEYWORD        0x0001
-#endif
-
-#define PyPARSE_DONT_IMPLY_DEDENT       0x0002
-
-#if 0
-#define PyPARSE_WITH_IS_KEYWORD         0x0003
-#define PyPARSE_PRINT_IS_FUNCTION       0x0004
-#define PyPARSE_UNICODE_LITERALS        0x0008
-#endif
-
-#define PyPARSE_IGNORE_COOKIE 0x0010
-#define PyPARSE_BARRY_AS_BDFL 0x0020
-#define PyPARSE_TYPE_COMMENTS 0x0040
-#define PyPARSE_ASYNC_HACKS   0x0080
-
-PyAPI_FUNC(node *) PyParser_ParseString(const char *, grammar *, int,
-                                              perrdetail *);
-PyAPI_FUNC(node *) PyParser_ParseFile (FILE *, const char *, grammar *, int,
-                                             const char *, const char *,
-                                             perrdetail *);
-
-PyAPI_FUNC(node *) PyParser_ParseStringFlags(const char *, grammar *, int,
-                                              perrdetail *, int);
-PyAPI_FUNC(node *) PyParser_ParseFileFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    const char *enc,
-    grammar *g,
-    int start,
-    const char *ps1,
-    const char *ps2,
-    perrdetail *err_ret,
-    int flags);
-PyAPI_FUNC(node *) PyParser_ParseFileFlagsEx(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    const char *enc,
-    grammar *g,
-    int start,
-    const char *ps1,
-    const char *ps2,
-    perrdetail *err_ret,
-    int *flags);
-PyAPI_FUNC(node *) PyParser_ParseFileObject(
-    FILE *fp,
-    PyObject *filename,
-    const char *enc,
-    grammar *g,
-    int start,
-    const char *ps1,
-    const char *ps2,
-    perrdetail *err_ret,
-    int *flags);
-
-PyAPI_FUNC(node *) PyParser_ParseStringFlagsFilename(
-    const char *s,
-    const char *filename,       /* decoded from the filesystem encoding */
-    grammar *g,
-    int start,
-    perrdetail *err_ret,
-    int flags);
-PyAPI_FUNC(node *) PyParser_ParseStringFlagsFilenameEx(
-    const char *s,
-    const char *filename,       /* decoded from the filesystem encoding */
-    grammar *g,
-    int start,
-    perrdetail *err_ret,
-    int *flags);
-PyAPI_FUNC(node *) PyParser_ParseStringObject(
-    const char *s,
-    PyObject *filename,
-    grammar *g,
-    int start,
-    perrdetail *err_ret,
-    int *flags);
-
-/* Note that the following functions are defined in pythonrun.c,
-   not in parsetok.c */
-PyAPI_FUNC(void) PyParser_SetError(perrdetail *);
-PyAPI_FUNC(void) PyParser_ClearError(perrdetail *);
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_PARSETOK_H */
-#endif /* !Py_LIMITED_API */
diff --git a/include/patchlevel.h b/include/patchlevel.h
index 0b5d280..5bb246e 100644
--- a/include/patchlevel.h
+++ b/include/patchlevel.h
@@ -17,13 +17,13 @@
 /* Version parsed out into numeric values */
 /*--start constants--*/
 #define PY_MAJOR_VERSION        3
-#define PY_MINOR_VERSION        9
-#define PY_MICRO_VERSION        1
+#define PY_MINOR_VERSION        10
+#define PY_MICRO_VERSION        3
 #define PY_RELEASE_LEVEL        PY_RELEASE_LEVEL_FINAL
 #define PY_RELEASE_SERIAL       0
 
 /* Version as a string */
-#define PY_VERSION              "3.9.1"
+#define PY_VERSION              "3.10.3"
 /*--end constants--*/
 
 /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
diff --git a/include/pyarena.h b/include/pyarena.h
deleted file mode 100644
index db3ad01..0000000
--- a/include/pyarena.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* An arena-like memory interface for the compiler.
- */
-
-#ifndef Py_LIMITED_API
-#ifndef Py_PYARENA_H
-#define Py_PYARENA_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-  typedef struct _arena PyArena;
-
-  /* PyArena_New() and PyArena_Free() create a new arena and free it,
-     respectively.  Once an arena has been created, it can be used
-     to allocate memory via PyArena_Malloc().  Pointers to PyObject can
-     also be registered with the arena via PyArena_AddPyObject(), and the
-     arena will ensure that the PyObjects stay alive at least until
-     PyArena_Free() is called.  When an arena is freed, all the memory it
-     allocated is freed, the arena releases internal references to registered
-     PyObject*, and none of its pointers are valid.
-     XXX (tim) What does "none of its pointers are valid" mean?  Does it
-     XXX mean that pointers previously obtained via PyArena_Malloc() are
-     XXX no longer valid?  (That's clearly true, but not sure that's what
-     XXX the text is trying to say.)
-
-     PyArena_New() returns an arena pointer.  On error, it
-     returns a negative number and sets an exception.
-     XXX (tim):  Not true.  On error, PyArena_New() actually returns NULL,
-     XXX and looks like it may or may not set an exception (e.g., if the
-     XXX internal PyList_New(0) returns NULL, PyArena_New() passes that on
-     XXX and an exception is set; OTOH, if the internal
-     XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but
-     XXX an exception is not set in that case).
-  */
-  PyAPI_FUNC(PyArena *) PyArena_New(void);
-  PyAPI_FUNC(void) PyArena_Free(PyArena *);
-
-  /* Mostly like malloc(), return the address of a block of memory spanning
-   * `size` bytes, or return NULL (without setting an exception) if enough
-   * new memory can't be obtained.  Unlike malloc(0), PyArena_Malloc() with
-   * size=0 does not guarantee to return a unique pointer (the pointer
-   * returned may equal one or more other pointers obtained from
-   * PyArena_Malloc()).
-   * Note that pointers obtained via PyArena_Malloc() must never be passed to
-   * the system free() or realloc(), or to any of Python's similar memory-
-   * management functions.  PyArena_Malloc()-obtained pointers remain valid
-   * until PyArena_Free(ar) is called, at which point all pointers obtained
-   * from the arena `ar` become invalid simultaneously.
-   */
-  PyAPI_FUNC(void *) PyArena_Malloc(PyArena *, size_t size);
-
-  /* This routine isn't a proper arena allocation routine.  It takes
-   * a PyObject* and records it so that it can be DECREFed when the
-   * arena is freed.
-   */
-  PyAPI_FUNC(int) PyArena_AddPyObject(PyArena *, PyObject *);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !Py_PYARENA_H */
-#endif /* Py_LIMITED_API */
diff --git a/include/pyconfig.h b/include/pyconfig.h
index d7d3cf0..103e647 100644
--- a/include/pyconfig.h
+++ b/include/pyconfig.h
@@ -165,11 +165,11 @@
 
 /* Define like size_t, omitting the "unsigned" */
 #ifdef MS_WIN64
-typedef __int64 ssize_t;
+typedef __int64 Py_ssize_t;
 #else
-typedef _W64 int ssize_t;
+typedef _W64 int Py_ssize_t;
 #endif
-#define HAVE_SSIZE_T 1
+#define HAVE_PY_SSIZE_T 1
 
 #if defined(MS_WIN32) && !defined(MS_WIN64)
 #if defined(_M_IX86)
@@ -268,11 +268,11 @@
                         file in their Makefile (other compilers are
                         generally taken care of by distutils.) */
 #                       if defined(_DEBUG)
-#                               pragma comment(lib,"python39_d.lib")
+#                               pragma comment(lib,"python310_d.lib")
 #                       elif defined(Py_LIMITED_API)
 #                               pragma comment(lib,"python3.lib")
 #                       else
-#                               pragma comment(lib,"python39.lib")
+#                               pragma comment(lib,"python310.lib")
 #                       endif /* _DEBUG */
 #               endif /* _MSC_VER */
 #       endif /* Py_BUILD_CORE */
@@ -289,6 +289,7 @@
 #       define SIZEOF_FPOS_T 8
 #       define SIZEOF_HKEY 8
 #       define SIZEOF_SIZE_T 8
+#       define ALIGNOF_SIZE_T 8
 /* configure.ac defines HAVE_LARGEFILE_SUPPORT iff
    sizeof(off_t) > sizeof(long), and sizeof(long long) >= sizeof(off_t).
    On Win64 the second condition is not true, but if fpos_t replaces off_t
@@ -303,6 +304,7 @@
 #       define SIZEOF_FPOS_T 8
 #       define SIZEOF_HKEY 4
 #       define SIZEOF_SIZE_T 4
+#       define ALIGNOF_SIZE_T 4
         /* MS VS2005 changes time_t to a 64-bit type on all platforms */
 #       if defined(_MSC_VER) && _MSC_VER >= 1400
 #       define SIZEOF_TIME_T 8
@@ -321,6 +323,7 @@
 #define SIZEOF_SHORT 2
 #define SIZEOF_INT 4
 #define SIZEOF_LONG 4
+#define ALIGNOF_LONG 4
 #define SIZEOF_LONG_LONG 8
 #define SIZEOF_DOUBLE 8
 #define SIZEOF_FLOAT 4
diff --git a/include/pyerrors.h b/include/pyerrors.h
index 979a26b..f5d1c71 100644
--- a/include/pyerrors.h
+++ b/include/pyerrors.h
@@ -30,12 +30,6 @@
    macro is defined. */
 PyAPI_FUNC(void) _Py_NO_RETURN Py_FatalError(const char *message);
 
-#if defined(Py_DEBUG) || defined(Py_LIMITED_API)
-#define _PyErr_OCCURRED() PyErr_Occurred()
-#else
-#define _PyErr_OCCURRED() (PyThreadState_GET()->curexc_type)
-#endif
-
 /* Error testing and normalization */
 PyAPI_FUNC(int) PyErr_GivenExceptionMatches(PyObject *, PyObject *);
 PyAPI_FUNC(int) PyErr_ExceptionMatches(PyObject *);
@@ -152,6 +146,7 @@
 PyAPI_DATA(PyObject *) PyExc_ImportWarning;
 PyAPI_DATA(PyObject *) PyExc_UnicodeWarning;
 PyAPI_DATA(PyObject *) PyExc_BytesWarning;
+PyAPI_DATA(PyObject *) PyExc_EncodingWarning;
 PyAPI_DATA(PyObject *) PyExc_ResourceWarning;
 
 
@@ -230,6 +225,9 @@
 /* In signalmodule.c */
 PyAPI_FUNC(int) PyErr_CheckSignals(void);
 PyAPI_FUNC(void) PyErr_SetInterrupt(void);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+PyAPI_FUNC(int) PyErr_SetInterruptEx(int signum);
+#endif
 
 /* Support for adding program text to SyntaxErrors */
 PyAPI_FUNC(void) PyErr_SyntaxLocation(
diff --git a/include/pyhash.h b/include/pyhash.h
index 4437b87..a314ea9 100644
--- a/include/pyhash.h
+++ b/include/pyhash.h
@@ -7,7 +7,7 @@
 
 /* Helpers for hash functions */
 #ifndef Py_LIMITED_API
-PyAPI_FUNC(Py_hash_t) _Py_HashDouble(double);
+PyAPI_FUNC(Py_hash_t) _Py_HashDouble(PyObject *, double);
 PyAPI_FUNC(Py_hash_t) _Py_HashPointer(const void*);
 // Similar to _Py_HashPointer(), but don't replace -1 with -2
 PyAPI_FUNC(Py_hash_t) _Py_HashPointerRaw(const void*);
@@ -29,7 +29,6 @@
 
 #define _PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1)
 #define _PyHASH_INF 314159
-#define _PyHASH_NAN 0
 #define _PyHASH_IMAG _PyHASH_MULTIPLIER
 
 
@@ -77,7 +76,6 @@
     } expat;
 } _Py_HashSecret_t;
 PyAPI_DATA(_Py_HashSecret_t) _Py_HashSecret;
-#endif
 
 #ifdef Py_DEBUG
 PyAPI_DATA(int) _Py_HashSecret_Initialized;
@@ -85,7 +83,6 @@
 
 
 /* hash function definition */
-#ifndef Py_LIMITED_API
 typedef struct {
     Py_hash_t (*const hash)(const void *, Py_ssize_t);
     const char *name;
diff --git a/include/pylifecycle.h b/include/pylifecycle.h
index c5368b3..2df7fe6 100644
--- a/include/pylifecycle.h
+++ b/include/pylifecycle.h
@@ -31,7 +31,6 @@
 
 /* Bootstrap __main__ (defined in Modules/main.c) */
 PyAPI_FUNC(int) Py_Main(int argc, wchar_t **argv);
-
 PyAPI_FUNC(int) Py_BytesMain(int argc, char **argv);
 
 /* In pathconfig.c */
diff --git a/include/pymath.h b/include/pymath.h
index 63ca972..f869724 100644
--- a/include/pymath.h
+++ b/include/pymath.h
@@ -227,12 +227,4 @@
  * behavior. */
 #define _Py_InIntegralTypeRange(type, v) (_Py_IntegralTypeMin(type) <= v && v <= _Py_IntegralTypeMax(type))
 
-/* Return the smallest integer k such that n < 2**k, or 0 if n == 0.
- * Equivalent to floor(log2(x))+1.  Also equivalent to: bitwidth_of_type -
- * count_leading_zero_bits(x)
- */
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(unsigned int) _Py_bit_length(unsigned long d);
-#endif
-
 #endif /* Py_PYMATH_H */
diff --git a/include/pymem.h b/include/pymem.h
index 607feb9..66cdb0d 100644
--- a/include/pymem.h
+++ b/include/pymem.h
@@ -25,8 +25,8 @@
    heap used by the Python DLL; it could be a disaster if you free()'ed that
    directly in your own extension.  Using PyMem_Free instead ensures Python
    can return the memory to the proper heap.  As another example, in
-   PYMALLOC_DEBUG mode, Python wraps all calls to all PyMem_ and PyObject_
-   memory functions in special debugging wrappers that add additional
+   a debug build (Py_DEBUG macro), Python wraps all calls to all PyMem_ and
+   PyObject_ memory functions in special debugging wrappers that add additional
    debugging info to dynamic memory blocks.  The system routines have no idea
    what to do with that stuff, and the Python wrappers have no idea what to do
    with raw blocks obtained directly by the system routines then.
@@ -50,21 +50,10 @@
 */
 
 PyAPI_FUNC(void *) PyMem_Malloc(size_t size);
+PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize);
 PyAPI_FUNC(void *) PyMem_Realloc(void *ptr, size_t new_size);
 PyAPI_FUNC(void) PyMem_Free(void *ptr);
 
-/* Macros. */
-
-/* PyMem_MALLOC(0) means malloc(1). Some systems would return NULL
-   for malloc(0), which would be treated as an error. Some platforms
-   would return a pointer with no memory behind it, which would break
-   pymalloc. To solve these problems, allocate an extra byte. */
-/* Returns NULL to indicate error if a negative size or size larger than
-   Py_ssize_t can represent is supplied.  Helps prevents security holes. */
-#define PyMem_MALLOC(n)         PyMem_Malloc(n)
-#define PyMem_REALLOC(p, n)     PyMem_Realloc(p, n)
-#define PyMem_FREE(p)           PyMem_Free(p)
-
 /*
  * Type-oriented memory interface
  * ==============================
@@ -78,9 +67,6 @@
 #define PyMem_New(type, n) \
   ( ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL :      \
         ( (type *) PyMem_Malloc((n) * sizeof(type)) ) )
-#define PyMem_NEW(type, n) \
-  ( ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL :      \
-        ( (type *) PyMem_MALLOC((n) * sizeof(type)) ) )
 
 /*
  * The value of (p) is always clobbered by this macro regardless of success.
@@ -91,15 +77,18 @@
 #define PyMem_Resize(p, type, n) \
   ( (p) = ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL :        \
         (type *) PyMem_Realloc((p), (n) * sizeof(type)) )
-#define PyMem_RESIZE(p, type, n) \
-  ( (p) = ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL :        \
-        (type *) PyMem_REALLOC((p), (n) * sizeof(type)) )
 
-/* PyMem{Del,DEL} are left over from ancient days, and shouldn't be used
- * anymore.  They're just confusing aliases for PyMem_{Free,FREE} now.
- */
-#define PyMem_Del               PyMem_Free
-#define PyMem_DEL               PyMem_FREE
+
+// Deprecated aliases only kept for backward compatibility.
+// PyMem_Del and PyMem_DEL are defined with no parameter to be able to use
+// them as function pointers (ex: dealloc = PyMem_Del).
+#define PyMem_MALLOC(n)           PyMem_Malloc(n)
+#define PyMem_NEW(type, n)        PyMem_New(type, n)
+#define PyMem_REALLOC(p, n)       PyMem_Realloc(p, n)
+#define PyMem_RESIZE(p, type, n)  PyMem_Resize(p, type, n)
+#define PyMem_FREE(p)             PyMem_Free(p)
+#define PyMem_Del                 PyMem_Free
+#define PyMem_DEL                 PyMem_Free
 
 
 #ifndef Py_LIMITED_API
diff --git a/include/pyport.h b/include/pyport.h
index 4bd4eb4..6ab0ae4 100644
--- a/include/pyport.h
+++ b/include/pyport.h
@@ -101,7 +101,9 @@
  * sizeof(size_t).  C99 doesn't define such a thing directly (size_t is an
  * unsigned integral type).  See PEP 353 for details.
  */
-#ifdef HAVE_SSIZE_T
+#ifdef HAVE_PY_SSIZE_T
+
+#elif HAVE_SSIZE_T
 typedef ssize_t         Py_ssize_t;
 #elif SIZEOF_VOID_P == SIZEOF_SIZE_T
 typedef Py_intptr_t     Py_ssize_t;
@@ -131,7 +133,9 @@
 /* Smallest negative value of type Py_ssize_t. */
 #define PY_SSIZE_T_MIN (-PY_SSIZE_T_MAX-1)
 
-/* PY_FORMAT_SIZE_T is a platform-specific modifier for use in a printf
+/* Macro kept for backward compatibility: use "z" in new code.
+ *
+ * PY_FORMAT_SIZE_T is a platform-specific modifier for use in a printf
  * format to convert an argument with the width of a size_t or Py_ssize_t.
  * C99 introduced "z" for this purpose, but old MSVCs had not supported it.
  * Since MSVC supports "z" since (at least) 2015, we can just use "z"
@@ -179,8 +183,9 @@
 
 #if defined(_MSC_VER)
 #  if defined(PY_LOCAL_AGGRESSIVE)
-   /* enable more aggressive optimization for visual studio */
-#  pragma optimize("agtw", on)
+   /* enable more aggressive optimization for MSVC */
+   /* active in both release and debug builds - see bpo-43271 */
+#  pragma optimize("gt", on)
 #endif
    /* ignore warnings if the compiler decides not to inline a function */
 #  pragma warning(disable: 4710)
@@ -839,12 +844,16 @@
 #endif
 
 #if defined(__ANDROID__) || defined(__VXWORKS__)
-   /* Ignore the locale encoding: force UTF-8 */
+   // Use UTF-8 as the locale encoding, ignore the LC_CTYPE locale.
+   // See _Py_GetLocaleEncoding(), PyUnicode_DecodeLocale()
+   // and PyUnicode_EncodeLocale().
 #  define _Py_FORCE_UTF8_LOCALE
 #endif
 
 #if defined(_Py_FORCE_UTF8_LOCALE) || defined(__APPLE__)
-   /* Use UTF-8 as filesystem encoding */
+   // Use UTF-8 as the filesystem encoding.
+   // See PyUnicode_DecodeFSDefaultAndSize(), PyUnicode_EncodeFSDefault(),
+   // Py_DecodeLocale() and Py_EncodeLocale().
 #  define _Py_FORCE_UTF8_FS_ENCODING
 #endif
 
@@ -852,6 +861,7 @@
    PyAPI_FUNC(void) _Py_NO_RETURN PyThread_exit_thread(void);
 
    XLC support is intentionally omitted due to bpo-40244 */
+#ifndef _Py_NO_RETURN
 #if defined(__clang__) || \
     (defined(__GNUC__) && \
      ((__GNUC__ >= 3) || \
@@ -862,6 +872,7 @@
 #else
 #  define _Py_NO_RETURN
 #endif
+#endif
 
 
 // Preprocessor check for a builtin preprocessor function. Always return 0
diff --git a/include/pythonrun.h b/include/pythonrun.h
index 5752907..b0a2fc3 100644
--- a/include/pythonrun.h
+++ b/include/pythonrun.h
@@ -7,194 +7,15 @@
 extern "C" {
 #endif
 
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *);
-PyAPI_FUNC(int) PyRun_AnyFileExFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int closeit,
-    PyCompilerFlags *flags);
-PyAPI_FUNC(int) PyRun_SimpleFileExFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int closeit,
-    PyCompilerFlags *flags);
-PyAPI_FUNC(int) PyRun_InteractiveOneFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    PyCompilerFlags *flags);
-PyAPI_FUNC(int) PyRun_InteractiveOneObject(
-    FILE *fp,
-    PyObject *filename,
-    PyCompilerFlags *flags);
-PyAPI_FUNC(int) PyRun_InteractiveLoopFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    PyCompilerFlags *flags);
-
-PyAPI_FUNC(struct _mod *) PyParser_ASTFromString(
-    const char *s,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int start,
-    PyCompilerFlags *flags,
-    PyArena *arena);
-PyAPI_FUNC(struct _mod *) PyParser_ASTFromStringObject(
-    const char *s,
-    PyObject *filename,
-    int start,
-    PyCompilerFlags *flags,
-    PyArena *arena);
-PyAPI_FUNC(struct _mod *) PyParser_ASTFromFile(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    const char* enc,
-    int start,
-    const char *ps1,
-    const char *ps2,
-    PyCompilerFlags *flags,
-    int *errcode,
-    PyArena *arena);
-PyAPI_FUNC(struct _mod *) PyParser_ASTFromFileObject(
-    FILE *fp,
-    PyObject *filename,
-    const char* enc,
-    int start,
-    const char *ps1,
-    const char *ps2,
-    PyCompilerFlags *flags,
-    int *errcode,
-    PyArena *arena);
-#endif
-
-#ifndef PyParser_SimpleParseString
-#define PyParser_SimpleParseString(S, B) \
-    PyParser_SimpleParseStringFlags(S, B, 0)
-#define PyParser_SimpleParseFile(FP, S, B) \
-    PyParser_SimpleParseFileFlags(FP, S, B, 0)
-#endif
-
-#ifndef Py_BUILD_CORE
-Py_DEPRECATED(3.9)
-#endif
-PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlags(const char *, int, int);
-#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
-#ifndef Py_BUILD_CORE
-Py_DEPRECATED(3.9)
-#endif
-PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlagsFilename(const char *,
-                                                                   const char *,
-                                                                   int, int);
-#endif
-#ifndef Py_BUILD_CORE
-Py_DEPRECATED(3.9)
-#endif
-PyAPI_FUNC(struct _node *) PyParser_SimpleParseFileFlags(FILE *, const char *, int, int);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *,
-                                         PyObject *, PyCompilerFlags *);
-
-PyAPI_FUNC(PyObject *) PyRun_FileExFlags(
-    FILE *fp,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int start,
-    PyObject *globals,
-    PyObject *locals,
-    int closeit,
-    PyCompilerFlags *flags);
-#endif
-
-#ifdef Py_LIMITED_API
 PyAPI_FUNC(PyObject *) Py_CompileString(const char *, const char *, int);
-#else
-#define Py_CompileString(str, p, s) Py_CompileStringExFlags(str, p, s, NULL, -1)
-#define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags(str, p, s, f, -1)
-PyAPI_FUNC(PyObject *) Py_CompileStringExFlags(
-    const char *str,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int start,
-    PyCompilerFlags *flags,
-    int optimize);
-PyAPI_FUNC(PyObject *) Py_CompileStringObject(
-    const char *str,
-    PyObject *filename, int start,
-    PyCompilerFlags *flags,
-    int optimize);
-#endif
-PyAPI_FUNC(struct symtable *) Py_SymtableString(
-    const char *str,
-    const char *filename,       /* decoded from the filesystem encoding */
-    int start);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(const char *) _Py_SourceAsString(
-    PyObject *cmd,
-    const char *funcname,
-    const char *what,
-    PyCompilerFlags *cf,
-    PyObject **cmd_copy);
-
-PyAPI_FUNC(struct symtable *) Py_SymtableStringObject(
-    const char *str,
-    PyObject *filename,
-    int start);
-
-PyAPI_FUNC(struct symtable *) _Py_SymtableStringObjectFlags(
-    const char *str,
-    PyObject *filename,
-    int start,
-    PyCompilerFlags *flags);
-#endif
 
 PyAPI_FUNC(void) PyErr_Print(void);
 PyAPI_FUNC(void) PyErr_PrintEx(int);
 PyAPI_FUNC(void) PyErr_Display(PyObject *, PyObject *, PyObject *);
 
-#ifndef Py_LIMITED_API
-/* A function flavor is also exported by libpython. It is required when
-    libpython is accessed directly rather than using header files which defines
-    macros below. On Windows, for example, PyAPI_FUNC() uses dllexport to
-    export functions in pythonXX.dll. */
-PyAPI_FUNC(PyObject *) PyRun_String(const char *str, int s, PyObject *g, PyObject *l);
-PyAPI_FUNC(int) PyRun_AnyFile(FILE *fp, const char *name);
-PyAPI_FUNC(int) PyRun_AnyFileEx(FILE *fp, const char *name, int closeit);
-PyAPI_FUNC(int) PyRun_AnyFileFlags(FILE *, const char *, PyCompilerFlags *);
-PyAPI_FUNC(int) PyRun_SimpleString(const char *s);
-PyAPI_FUNC(int) PyRun_SimpleFile(FILE *f, const char *p);
-PyAPI_FUNC(int) PyRun_SimpleFileEx(FILE *f, const char *p, int c);
-PyAPI_FUNC(int) PyRun_InteractiveOne(FILE *f, const char *p);
-PyAPI_FUNC(int) PyRun_InteractiveLoop(FILE *f, const char *p);
-PyAPI_FUNC(PyObject *) PyRun_File(FILE *fp, const char *p, int s, PyObject *g, PyObject *l);
-PyAPI_FUNC(PyObject *) PyRun_FileEx(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, int c);
-PyAPI_FUNC(PyObject *) PyRun_FileFlags(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, PyCompilerFlags *flags);
-
-/* Use macros for a bunch of old variants */
-#define PyRun_String(str, s, g, l) PyRun_StringFlags(str, s, g, l, NULL)
-#define PyRun_AnyFile(fp, name) PyRun_AnyFileExFlags(fp, name, 0, NULL)
-#define PyRun_AnyFileEx(fp, name, closeit) \
-    PyRun_AnyFileExFlags(fp, name, closeit, NULL)
-#define PyRun_AnyFileFlags(fp, name, flags) \
-    PyRun_AnyFileExFlags(fp, name, 0, flags)
-#define PyRun_SimpleString(s) PyRun_SimpleStringFlags(s, NULL)
-#define PyRun_SimpleFile(f, p) PyRun_SimpleFileExFlags(f, p, 0, NULL)
-#define PyRun_SimpleFileEx(f, p, c) PyRun_SimpleFileExFlags(f, p, c, NULL)
-#define PyRun_InteractiveOne(f, p) PyRun_InteractiveOneFlags(f, p, NULL)
-#define PyRun_InteractiveLoop(f, p) PyRun_InteractiveLoopFlags(f, p, NULL)
-#define PyRun_File(fp, p, s, g, l) \
-    PyRun_FileExFlags(fp, p, s, g, l, 0, NULL)
-#define PyRun_FileEx(fp, p, s, g, l, c) \
-    PyRun_FileExFlags(fp, p, s, g, l, c, NULL)
-#define PyRun_FileFlags(fp, p, s, g, l, flags) \
-    PyRun_FileExFlags(fp, p, s, g, l, 0, flags)
-#endif
 
 /* Stuff with no proper home (yet) */
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *);
-#endif
 PyAPI_DATA(int) (*PyOS_InputHook)(void);
-PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *);
-#ifndef Py_LIMITED_API
-PyAPI_DATA(PyThreadState*) _PyOS_ReadlineTState;
-#endif
 
 /* Stack size, in "pointers" (so we get extra safety margins
    on 64-bit platforms).  On a 32-bit platform, this translates
@@ -211,6 +32,12 @@
 PyAPI_FUNC(int) PyOS_CheckStack(void);
 #endif
 
+#ifndef Py_LIMITED_API
+#  define Py_CPYTHON_PYTHONRUN_H
+#  include  "cpython/pythonrun.h"
+#  undef Py_CPYTHON_PYTHONRUN_H
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/setobject.h b/include/setobject.h
index 119619e..62516be 100644
--- a/include/setobject.h
+++ b/include/setobject.h
@@ -88,18 +88,21 @@
 PyAPI_FUNC(Py_ssize_t) PySet_Size(PyObject *anyset);
 
 #define PyFrozenSet_CheckExact(ob) Py_IS_TYPE(ob, &PyFrozenSet_Type)
+#define PyFrozenSet_Check(ob) \
+    (Py_IS_TYPE(ob, &PyFrozenSet_Type) || \
+      PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type))
+
 #define PyAnySet_CheckExact(ob) \
     (Py_IS_TYPE(ob, &PySet_Type) || Py_IS_TYPE(ob, &PyFrozenSet_Type))
 #define PyAnySet_Check(ob) \
     (Py_IS_TYPE(ob, &PySet_Type) || Py_IS_TYPE(ob, &PyFrozenSet_Type) || \
       PyType_IsSubtype(Py_TYPE(ob), &PySet_Type) || \
       PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type))
+
+#define PySet_CheckExact(op) Py_IS_TYPE(op, &PySet_Type)
 #define PySet_Check(ob) \
     (Py_IS_TYPE(ob, &PySet_Type) || \
     PyType_IsSubtype(Py_TYPE(ob), &PySet_Type))
-#define   PyFrozenSet_Check(ob) \
-    (Py_IS_TYPE(ob, &PyFrozenSet_Type) || \
-      PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type))
 
 #ifdef __cplusplus
 }
diff --git a/include/structmember.h b/include/structmember.h
index b54f708..93b7aff 100644
--- a/include/structmember.h
+++ b/include/structmember.h
@@ -62,6 +62,7 @@
 #define PY_WRITE_RESTRICTED 4
 #define RESTRICTED          (READ_RESTRICTED | PY_WRITE_RESTRICTED)
 
+#define PY_AUDIT_READ       READ_RESTRICTED
 
 /* Current API, use this */
 PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, struct PyMemberDef *);
diff --git a/include/token.h b/include/token.h
index 9b8a3aa..eb1b9ea 100644
--- a/include/token.h
+++ b/include/token.h
@@ -69,8 +69,9 @@
 #define ASYNC           56
 #define TYPE_IGNORE     57
 #define TYPE_COMMENT    58
-#define ERRORTOKEN      59
-#define N_TOKENS        63
+#define SOFT_KEYWORD    59
+#define ERRORTOKEN      60
+#define N_TOKENS        64
 #define NT_OFFSET       256
 
 /* Special definitions for cooperation with parser */
diff --git a/include/typeslots.h b/include/typeslots.h
index 64f6fff..5800d01 100644
--- a/include/typeslots.h
+++ b/include/typeslots.h
@@ -88,3 +88,7 @@
 /* New in 3.5 */
 #define Py_tp_finalize 80
 #endif
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+/* New in 3.10 */
+#define Py_am_send 81
+#endif
diff --git a/include/ucnhash.h b/include/ucnhash.h
deleted file mode 100644
index 45362e9..0000000
--- a/include/ucnhash.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Unicode name database interface */
-#ifndef Py_LIMITED_API
-#ifndef Py_UCNHASH_H
-#define Py_UCNHASH_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* revised ucnhash CAPI interface (exported through a "wrapper") */
-
-#define PyUnicodeData_CAPSULE_NAME "unicodedata.ucnhash_CAPI"
-
-typedef struct {
-
-    /* Size of this struct */
-    int size;
-
-    /* Get name for a given character code.  Returns non-zero if
-       success, zero if not.  Does not set Python exceptions.
-       If self is NULL, data come from the default version of the database.
-       If it is not NULL, it should be a unicodedata.ucd_X_Y_Z object */
-    int (*getname)(PyObject *self, Py_UCS4 code, char* buffer, int buflen,
-                   int with_alias_and_seq);
-
-    /* Get character code for a given name.  Same error handling
-       as for getname. */
-    int (*getcode)(PyObject *self, const char* name, int namelen, Py_UCS4* code,
-                   int with_named_seq);
-
-} _PyUnicode_Name_CAPI;
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* !Py_UCNHASH_H */
-#endif /* !Py_LIMITED_API */
diff --git a/include/unicodeobject.h b/include/unicodeobject.h
index 500ce24..b0ac086 100644
--- a/include/unicodeobject.h
+++ b/include/unicodeobject.h
@@ -261,11 +261,14 @@
     );
 
 PyAPI_FUNC(void) PyUnicode_InternInPlace(PyObject **);
-PyAPI_FUNC(void) PyUnicode_InternImmortal(PyObject **);
 PyAPI_FUNC(PyObject *) PyUnicode_InternFromString(
     const char *u              /* UTF-8 encoded string */
     );
 
+// PyUnicode_InternImmortal() is deprecated since Python 3.10
+// and will be removed in Python 3.12. Use PyUnicode_InternInPlace() instead.
+Py_DEPRECATED(3.10) PyAPI_FUNC(void) PyUnicode_InternImmortal(PyObject **);
+
 /* Use only if you know it's a string */
 #define PyUnicode_CHECK_INTERNED(op) \
     (((PyASCIIObject *)(op))->state.interned)
@@ -465,6 +468,23 @@
     PyObject *unicode           /* Unicode object */
     );
 
+/* Returns a pointer to the default encoding (UTF-8) of the
+   Unicode object unicode and the size of the encoded representation
+   in bytes stored in *size.
+
+   In case of an error, no *size is set.
+
+   This function caches the UTF-8 encoded string in the unicodeobject
+   and subsequent calls will return the same string.  The memory is released
+   when the unicodeobject is deallocated.
+*/
+
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
+PyAPI_FUNC(const char *) PyUnicode_AsUTF8AndSize(
+    PyObject *unicode,
+    Py_ssize_t *size);
+#endif
+
 /* --- UTF-32 Codecs ------------------------------------------------------ */
 
 /* Decodes length bytes from a UTF-32 encoded buffer string and returns
diff --git a/libs/python310.lib b/libs/python310.lib
new file mode 100644
index 0000000..c72632c
--- /dev/null
+++ b/libs/python310.lib
Binary files differ
diff --git a/libs/python39.lib b/libs/python39.lib
deleted file mode 100644
index 208b57c..0000000
--- a/libs/python39.lib
+++ /dev/null
Binary files differ
diff --git a/python.exe b/python.exe
index c9eea11..973b5f8 100644
--- a/python.exe
+++ b/python.exe
Binary files differ
diff --git a/python310.dll b/python310.dll
new file mode 100644
index 0000000..5db4b7f
--- /dev/null
+++ b/python310.dll
Binary files differ
diff --git a/python39.dll b/python39.dll
deleted file mode 100644
index 883486b..0000000
--- a/python39.dll
+++ /dev/null
Binary files differ
diff --git a/pythonw.exe b/pythonw.exe
index adf8d98..b475b20 100644
--- a/pythonw.exe
+++ b/pythonw.exe
Binary files differ