aboutsummaryrefslogtreecommitdiff
path: root/pkg_resources/_vendor
diff options
context:
space:
mode:
Diffstat (limited to 'pkg_resources/_vendor')
-rw-r--r--pkg_resources/_vendor/appdirs-1.4.3.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/appdirs.py86
-rw-r--r--pkg_resources/_vendor/importlib_resources-5.4.0.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/importlib_resources/__init__.py36
-rw-r--r--pkg_resources/_vendor/importlib_resources/_adapters.py170
-rw-r--r--pkg_resources/_vendor/importlib_resources/_common.py104
-rw-r--r--pkg_resources/_vendor/importlib_resources/_compat.py98
-rw-r--r--pkg_resources/_vendor/importlib_resources/_itertools.py35
-rw-r--r--pkg_resources/_vendor/importlib_resources/_legacy.py121
-rw-r--r--pkg_resources/_vendor/importlib_resources/abc.py137
-rw-r--r--pkg_resources/_vendor/importlib_resources/readers.py122
-rw-r--r--pkg_resources/_vendor/importlib_resources/simple.py116
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/_compat.py19
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data01/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data02/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data02/one/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data02/one/resource1.txt1
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data02/two/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/data02/two/resource2.txt1
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py102
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_contents.py43
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_files.py46
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_open.py81
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_path.py64
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_read.py76
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_reader.py128
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/test_resource.py252
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/update-zips.py53
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/util.py178
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/zipdata01/__init__.py0
-rw-r--r--pkg_resources/_vendor/importlib_resources/tests/zipdata02/__init__.py0
-rw-r--r--pkg_resources/_vendor/jaraco.context-4.1.1.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/jaraco.functools-3.5.0.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/jaraco/__init__.py0
-rw-r--r--pkg_resources/_vendor/jaraco/context.py213
-rw-r--r--pkg_resources/_vendor/jaraco/functools.py525
-rw-r--r--pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt2
-rw-r--r--pkg_resources/_vendor/jaraco/text/__init__.py599
-rw-r--r--pkg_resources/_vendor/more_itertools-8.12.0.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/more_itertools/__init__.py4
-rw-r--r--pkg_resources/_vendor/more_itertools/more.py4316
-rw-r--r--pkg_resources/_vendor/more_itertools/recipes.py698
-rw-r--r--pkg_resources/_vendor/packaging-21.3.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/packaging/__about__.py17
-rw-r--r--pkg_resources/_vendor/packaging/__init__.py21
-rw-r--r--pkg_resources/_vendor/packaging/_compat.py30
-rw-r--r--pkg_resources/_vendor/packaging/_manylinux.py301
-rw-r--r--pkg_resources/_vendor/packaging/_musllinux.py136
-rw-r--r--pkg_resources/_vendor/packaging/_structures.py49
-rw-r--r--pkg_resources/_vendor/packaging/markers.py197
-rw-r--r--pkg_resources/_vendor/packaging/requirements.py95
-rw-r--r--pkg_resources/_vendor/packaging/specifiers.py376
-rw-r--r--pkg_resources/_vendor/packaging/tags.py487
-rw-r--r--pkg_resources/_vendor/packaging/utils.py128
-rw-r--r--pkg_resources/_vendor/packaging/version.py335
-rw-r--r--pkg_resources/_vendor/pyparsing-2.2.1.dist-info/LICENSE.txt18
-rw-r--r--pkg_resources/_vendor/pyparsing-2.2.1.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/pyparsing.py96
-rw-r--r--pkg_resources/_vendor/six.py868
-rw-r--r--pkg_resources/_vendor/vendored.txt12
-rw-r--r--pkg_resources/_vendor/zipp-3.7.0.dist-info/top_level.txt1
-rw-r--r--pkg_resources/_vendor/zipp.py329
65 files changed, 10525 insertions, 1405 deletions
diff --git a/pkg_resources/_vendor/appdirs-1.4.3.dist-info/top_level.txt b/pkg_resources/_vendor/appdirs-1.4.3.dist-info/top_level.txt
new file mode 100644
index 0000000..d64bc32
--- /dev/null
+++ b/pkg_resources/_vendor/appdirs-1.4.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+appdirs
diff --git a/pkg_resources/_vendor/appdirs.py b/pkg_resources/_vendor/appdirs.py
index f4dba09..ae67001 100644
--- a/pkg_resources/_vendor/appdirs.py
+++ b/pkg_resources/_vendor/appdirs.py
@@ -13,7 +13,7 @@ See <http://github.com/ActiveState/appdirs> for details and usage.
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-__version_info__ = (1, 4, 0)
+__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
@@ -98,7 +98,7 @@ def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
- """Return full path to the user-shared data dir for this application.
+ r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
@@ -117,7 +117,7 @@ def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
- Typical user data directories are:
+ Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
@@ -184,13 +184,13 @@ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
- Typical user data directories are:
+ Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
- That means, by deafult "~/.config/<AppName>".
+ That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
@@ -204,7 +204,7 @@ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
- """Return full path to the user-shared data dir for this application.
+ r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
@@ -222,7 +222,7 @@ def site_config_dir(appname=None, appauthor=None, version=None, multipath=False)
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
- Typical user data directories are:
+ Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
@@ -311,6 +311,48 @@ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
return path
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
@@ -329,7 +371,7 @@ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
- Typical user cache directories are:
+ Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
@@ -364,8 +406,8 @@ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
- def __init__(self, appname, appauthor=None, version=None, roaming=False,
- multipath=False):
+ def __init__(self, appname=None, appauthor=None, version=None,
+ roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
@@ -398,6 +440,11 @@ class AppDirs(object):
version=self.version)
@property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
@@ -410,7 +457,10 @@ def _get_win_folder_from_registry(csidl_name):
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
- import _winreg
+ if PY3:
+ import winreg as _winreg
+ else:
+ import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
@@ -500,7 +550,7 @@ def _get_win_folder_with_jna(csidl_name):
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
- if kernal.GetShortPathName(dir, buf, buf_size):
+ if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
@@ -527,9 +577,15 @@ if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
- props = ("user_data_dir", "site_data_dir",
- "user_config_dir", "site_config_dir",
- "user_cache_dir", "user_log_dir")
+ props = ("user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir")
+
+ print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
diff --git a/pkg_resources/_vendor/importlib_resources-5.4.0.dist-info/top_level.txt b/pkg_resources/_vendor/importlib_resources-5.4.0.dist-info/top_level.txt
new file mode 100644
index 0000000..58ad1bd
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources-5.4.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+importlib_resources
diff --git a/pkg_resources/_vendor/importlib_resources/__init__.py b/pkg_resources/_vendor/importlib_resources/__init__.py
new file mode 100644
index 0000000..34e3a99
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/__init__.py
@@ -0,0 +1,36 @@
+"""Read resources contained within a package."""
+
+from ._common import (
+ as_file,
+ files,
+ Package,
+)
+
+from ._legacy import (
+ contents,
+ open_binary,
+ read_binary,
+ open_text,
+ read_text,
+ is_resource,
+ path,
+ Resource,
+)
+
+from .abc import ResourceReader
+
+
+__all__ = [
+ 'Package',
+ 'Resource',
+ 'ResourceReader',
+ 'as_file',
+ 'contents',
+ 'files',
+ 'is_resource',
+ 'open_binary',
+ 'open_text',
+ 'path',
+ 'read_binary',
+ 'read_text',
+]
diff --git a/pkg_resources/_vendor/importlib_resources/_adapters.py b/pkg_resources/_vendor/importlib_resources/_adapters.py
new file mode 100644
index 0000000..ea363d8
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/_adapters.py
@@ -0,0 +1,170 @@
+from contextlib import suppress
+from io import TextIOWrapper
+
+from . import abc
+
+
+class SpecLoaderAdapter:
+ """
+ Adapt a package spec to adapt the underlying loader.
+ """
+
+ def __init__(self, spec, adapter=lambda spec: spec.loader):
+ self.spec = spec
+ self.loader = adapter(spec)
+
+ def __getattr__(self, name):
+ return getattr(self.spec, name)
+
+
+class TraversableResourcesLoader:
+ """
+ Adapt a loader to provide TraversableResources.
+ """
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ def get_resource_reader(self, name):
+ return CompatibilityFiles(self.spec)._native()
+
+
+def _io_wrapper(file, mode='r', *args, **kwargs):
+ if mode == 'r':
+ return TextIOWrapper(file, *args, **kwargs)
+ elif mode == 'rb':
+ return file
+ raise ValueError(
+ "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
+ )
+
+
+class CompatibilityFiles:
+ """
+ Adapter for an existing or non-existent resource reader
+ to provide a compatibility .files().
+ """
+
+ class SpecPath(abc.Traversable):
+ """
+ Path tied to a module spec.
+ Can be read and exposes the resource reader children.
+ """
+
+ def __init__(self, spec, reader):
+ self._spec = spec
+ self._reader = reader
+
+ def iterdir(self):
+ if not self._reader:
+ return iter(())
+ return iter(
+ CompatibilityFiles.ChildPath(self._reader, path)
+ for path in self._reader.contents()
+ )
+
+ def is_file(self):
+ return False
+
+ is_dir = is_file
+
+ def joinpath(self, other):
+ if not self._reader:
+ return CompatibilityFiles.OrphanPath(other)
+ return CompatibilityFiles.ChildPath(self._reader, other)
+
+ @property
+ def name(self):
+ return self._spec.name
+
+ def open(self, mode='r', *args, **kwargs):
+ return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
+
+ class ChildPath(abc.Traversable):
+ """
+ Path tied to a resource reader child.
+ Can be read but doesn't expose any meaningful children.
+ """
+
+ def __init__(self, reader, name):
+ self._reader = reader
+ self._name = name
+
+ def iterdir(self):
+ return iter(())
+
+ def is_file(self):
+ return self._reader.is_resource(self.name)
+
+ def is_dir(self):
+ return not self.is_file()
+
+ def joinpath(self, other):
+ return CompatibilityFiles.OrphanPath(self.name, other)
+
+ @property
+ def name(self):
+ return self._name
+
+ def open(self, mode='r', *args, **kwargs):
+ return _io_wrapper(
+ self._reader.open_resource(self.name), mode, *args, **kwargs
+ )
+
+ class OrphanPath(abc.Traversable):
+ """
+ Orphan path, not tied to a module spec or resource reader.
+ Can't be read and doesn't expose any meaningful children.
+ """
+
+ def __init__(self, *path_parts):
+ if len(path_parts) < 1:
+ raise ValueError('Need at least one path part to construct a path')
+ self._path = path_parts
+
+ def iterdir(self):
+ return iter(())
+
+ def is_file(self):
+ return False
+
+ is_dir = is_file
+
+ def joinpath(self, other):
+ return CompatibilityFiles.OrphanPath(*self._path, other)
+
+ @property
+ def name(self):
+ return self._path[-1]
+
+ def open(self, mode='r', *args, **kwargs):
+ raise FileNotFoundError("Can't open orphan path")
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ @property
+ def _reader(self):
+ with suppress(AttributeError):
+ return self.spec.loader.get_resource_reader(self.spec.name)
+
+ def _native(self):
+ """
+ Return the native reader if it supports files().
+ """
+ reader = self._reader
+ return reader if hasattr(reader, 'files') else self
+
+ def __getattr__(self, attr):
+ return getattr(self._reader, attr)
+
+ def files(self):
+ return CompatibilityFiles.SpecPath(self.spec, self._reader)
+
+
+def wrap_spec(package):
+ """
+ Construct a package spec with traversable compatibility
+ on the spec/loader/reader.
+ """
+ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/pkg_resources/_vendor/importlib_resources/_common.py b/pkg_resources/_vendor/importlib_resources/_common.py
new file mode 100644
index 0000000..a12e2c7
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/_common.py
@@ -0,0 +1,104 @@
+import os
+import pathlib
+import tempfile
+import functools
+import contextlib
+import types
+import importlib
+
+from typing import Union, Optional
+from .abc import ResourceReader, Traversable
+
+from ._compat import wrap_spec
+
+Package = Union[types.ModuleType, str]
+
+
+def files(package):
+ # type: (Package) -> Traversable
+ """
+ Get a Traversable resource from a package
+ """
+ return from_package(get_package(package))
+
+
+def get_resource_reader(package):
+ # type: (types.ModuleType) -> Optional[ResourceReader]
+ """
+ Return the package's loader if it's a ResourceReader.
+ """
+ # We can't use
+ # a issubclass() check here because apparently abc.'s __subclasscheck__()
+ # hook wants to create a weak reference to the object, but
+ # zipimport.zipimporter does not support weak references, resulting in a
+ # TypeError. That seems terrible.
+ spec = package.__spec__
+ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
+ if reader is None:
+ return None
+ return reader(spec.name) # type: ignore
+
+
+def resolve(cand):
+ # type: (Package) -> types.ModuleType
+ return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
+
+
+def get_package(package):
+ # type: (Package) -> types.ModuleType
+ """Take a package name or module object and return the module.
+
+ Raise an exception if the resolved module is not a package.
+ """
+ resolved = resolve(package)
+ if wrap_spec(resolved).submodule_search_locations is None:
+ raise TypeError(f'{package!r} is not a package')
+ return resolved
+
+
+def from_package(package):
+ """
+ Return a Traversable object for the given package.
+
+ """
+ spec = wrap_spec(package)
+ reader = spec.loader.get_resource_reader(spec.name)
+ return reader.files()
+
+
+@contextlib.contextmanager
+def _tempfile(reader, suffix=''):
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
+ # blocks due to the need to close the temporary file to work on Windows
+ # properly.
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
+ try:
+ try:
+ os.write(fd, reader())
+ finally:
+ os.close(fd)
+ del reader
+ yield pathlib.Path(raw_path)
+ finally:
+ try:
+ os.remove(raw_path)
+ except FileNotFoundError:
+ pass
+
+
+@functools.singledispatch
+def as_file(path):
+ """
+ Given a Traversable object, return that object as a
+ path on the local file system in a context manager.
+ """
+ return _tempfile(path.read_bytes, suffix=path.name)
+
+
+@as_file.register(pathlib.Path)
+@contextlib.contextmanager
+def _(path):
+ """
+ Degenerate behavior for pathlib.Path objects.
+ """
+ yield path
diff --git a/pkg_resources/_vendor/importlib_resources/_compat.py b/pkg_resources/_vendor/importlib_resources/_compat.py
new file mode 100644
index 0000000..cb9fc82
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/_compat.py
@@ -0,0 +1,98 @@
+# flake8: noqa
+
+import abc
+import sys
+import pathlib
+from contextlib import suppress
+
+if sys.version_info >= (3, 10):
+ from zipfile import Path as ZipPath # type: ignore
+else:
+ from ..zipp import Path as ZipPath # type: ignore
+
+
+try:
+ from typing import runtime_checkable # type: ignore
+except ImportError:
+
+ def runtime_checkable(cls): # type: ignore
+ return cls
+
+
+try:
+ from typing import Protocol # type: ignore
+except ImportError:
+ Protocol = abc.ABC # type: ignore
+
+
+class TraversableResourcesLoader:
+ """
+ Adapt loaders to provide TraversableResources and other
+ compatibility.
+
+ Used primarily for Python 3.9 and earlier where the native
+ loaders do not yet implement TraversableResources.
+ """
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ @property
+ def path(self):
+ return self.spec.origin
+
+ def get_resource_reader(self, name):
+ from . import readers, _adapters
+
+ def _zip_reader(spec):
+ with suppress(AttributeError):
+ return readers.ZipReader(spec.loader, spec.name)
+
+ def _namespace_reader(spec):
+ with suppress(AttributeError, ValueError):
+ return readers.NamespaceReader(spec.submodule_search_locations)
+
+ def _available_reader(spec):
+ with suppress(AttributeError):
+ return spec.loader.get_resource_reader(spec.name)
+
+ def _native_reader(spec):
+ reader = _available_reader(spec)
+ return reader if hasattr(reader, 'files') else None
+
+ def _file_reader(spec):
+ try:
+ path = pathlib.Path(self.path)
+ except TypeError:
+ return None
+ if path.exists():
+ return readers.FileReader(self)
+
+ return (
+ # native reader if it supplies 'files'
+ _native_reader(self.spec)
+ or
+ # local ZipReader if a zip module
+ _zip_reader(self.spec)
+ or
+ # local NamespaceReader if a namespace module
+ _namespace_reader(self.spec)
+ or
+ # local FileReader
+ _file_reader(self.spec)
+ # fallback - adapt the spec ResourceReader to TraversableReader
+ or _adapters.CompatibilityFiles(self.spec)
+ )
+
+
+def wrap_spec(package):
+ """
+ Construct a package spec with traversable compatibility
+ on the spec/loader/reader.
+
+ Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
+ from above for older Python compatibility (<3.10).
+ """
+ from . import _adapters
+
+ return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/pkg_resources/_vendor/importlib_resources/_itertools.py b/pkg_resources/_vendor/importlib_resources/_itertools.py
new file mode 100644
index 0000000..cce0558
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/_itertools.py
@@ -0,0 +1,35 @@
+from itertools import filterfalse
+
+from typing import (
+ Callable,
+ Iterable,
+ Iterator,
+ Optional,
+ Set,
+ TypeVar,
+ Union,
+)
+
+# Type and type variable definitions
+_T = TypeVar('_T')
+_U = TypeVar('_U')
+
+
+def unique_everseen(
+ iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
+) -> Iterator[_T]:
+ "List unique elements, preserving order. Remember all elements ever seen."
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen: Set[Union[_T, _U]] = set()
+ seen_add = seen.add
+ if key is None:
+ for element in filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
diff --git a/pkg_resources/_vendor/importlib_resources/_legacy.py b/pkg_resources/_vendor/importlib_resources/_legacy.py
new file mode 100644
index 0000000..1d5d3f1
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/_legacy.py
@@ -0,0 +1,121 @@
+import functools
+import os
+import pathlib
+import types
+import warnings
+
+from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
+
+from . import _common
+
+Package = Union[types.ModuleType, str]
+Resource = str
+
+
+def deprecated(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ warnings.warn(
+ f"{func.__name__} is deprecated. Use files() instead. "
+ "Refer to https://importlib-resources.readthedocs.io"
+ "/en/latest/using.html#migrating-from-legacy for migration advice.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return func(*args, **kwargs)
+
+ return wrapper
+
+
+def normalize_path(path):
+ # type: (Any) -> str
+ """Normalize a path by ensuring it is a string.
+
+ If the resulting string contains path separators, an exception is raised.
+ """
+ str_path = str(path)
+ parent, file_name = os.path.split(str_path)
+ if parent:
+ raise ValueError(f'{path!r} must be only a file name')
+ return file_name
+
+
+@deprecated
+def open_binary(package: Package, resource: Resource) -> BinaryIO:
+ """Return a file-like object opened for binary reading of the resource."""
+ return (_common.files(package) / normalize_path(resource)).open('rb')
+
+
+@deprecated
+def read_binary(package: Package, resource: Resource) -> bytes:
+ """Return the binary contents of the resource."""
+ return (_common.files(package) / normalize_path(resource)).read_bytes()
+
+
+@deprecated
+def open_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict',
+) -> TextIO:
+ """Return a file-like object opened for text reading of the resource."""
+ return (_common.files(package) / normalize_path(resource)).open(
+ 'r', encoding=encoding, errors=errors
+ )
+
+
+@deprecated
+def read_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict',
+) -> str:
+ """Return the decoded string of the resource.
+
+ The decoding-related arguments have the same semantics as those of
+ bytes.decode().
+ """
+ with open_text(package, resource, encoding, errors) as fp:
+ return fp.read()
+
+
+@deprecated
+def contents(package: Package) -> Iterable[str]:
+ """Return an iterable of entries in `package`.
+
+ Note that not all entries are resources. Specifically, directories are
+ not considered resources. Use `is_resource()` on each entry returned here
+ to check if it is a resource or not.
+ """
+ return [path.name for path in _common.files(package).iterdir()]
+
+
+@deprecated
+def is_resource(package: Package, name: str) -> bool:
+ """True if `name` is a resource inside `package`.
+
+ Directories are *not* resources.
+ """
+ resource = normalize_path(name)
+ return any(
+ traversable.name == resource and traversable.is_file()
+ for traversable in _common.files(package).iterdir()
+ )
+
+
+@deprecated
+def path(
+ package: Package,
+ resource: Resource,
+) -> ContextManager[pathlib.Path]:
+ """A context manager providing a file path object to the resource.
+
+ If the resource does not already exist on its own on the file system,
+ a temporary file will be created. If the file was created, the file
+ will be deleted upon exiting the context manager (no exception is
+ raised if the file was deleted prior to the context manager
+ exiting).
+ """
+ return _common.as_file(_common.files(package) / normalize_path(resource))
diff --git a/pkg_resources/_vendor/importlib_resources/abc.py b/pkg_resources/_vendor/importlib_resources/abc.py
new file mode 100644
index 0000000..d39dc1a
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/abc.py
@@ -0,0 +1,137 @@
+import abc
+from typing import BinaryIO, Iterable, Text
+
+from ._compat import runtime_checkable, Protocol
+
+
+class ResourceReader(metaclass=abc.ABCMeta):
+ """Abstract base class for loaders to provide resource reading support."""
+
+ @abc.abstractmethod
+ def open_resource(self, resource: Text) -> BinaryIO:
+ """Return an opened, file-like object for binary reading.
+
+ The 'resource' argument is expected to represent only a file name.
+ If the resource cannot be found, FileNotFoundError is raised.
+ """
+ # This deliberately raises FileNotFoundError instead of
+ # NotImplementedError so that if this method is accidentally called,
+ # it'll still do the right thing.
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def resource_path(self, resource: Text) -> Text:
+ """Return the file system path to the specified resource.
+
+ The 'resource' argument is expected to represent only a file name.
+ If the resource does not exist on the file system, raise
+ FileNotFoundError.
+ """
+ # This deliberately raises FileNotFoundError instead of
+ # NotImplementedError so that if this method is accidentally called,
+ # it'll still do the right thing.
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def is_resource(self, path: Text) -> bool:
+ """Return True if the named 'path' is a resource.
+
+ Files are resources, directories are not.
+ """
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def contents(self) -> Iterable[str]:
+ """Return an iterable of entries in `package`."""
+ raise FileNotFoundError
+
+
+@runtime_checkable
+class Traversable(Protocol):
+ """
+ An object with a subset of pathlib.Path methods suitable for
+ traversing directories and opening files.
+ """
+
+ @abc.abstractmethod
+ def iterdir(self):
+ """
+ Yield Traversable objects in self
+ """
+
+ def read_bytes(self):
+ """
+ Read contents of self as bytes
+ """
+ with self.open('rb') as strm:
+ return strm.read()
+
+ def read_text(self, encoding=None):
+ """
+ Read contents of self as text
+ """
+ with self.open(encoding=encoding) as strm:
+ return strm.read()
+
+ @abc.abstractmethod
+ def is_dir(self) -> bool:
+ """
+ Return True if self is a directory
+ """
+
+ @abc.abstractmethod
+ def is_file(self) -> bool:
+ """
+ Return True if self is a file
+ """
+
+ @abc.abstractmethod
+ def joinpath(self, child):
+ """
+ Return Traversable child in self
+ """
+
+ def __truediv__(self, child):
+ """
+ Return Traversable child in self
+ """
+ return self.joinpath(child)
+
+ @abc.abstractmethod
+ def open(self, mode='r', *args, **kwargs):
+ """
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
+ suitable for reading (same as pathlib.Path.open).
+
+ When opening as text, accepts encoding parameters such as those
+ accepted by io.TextIOWrapper.
+ """
+
+ @abc.abstractproperty
+ def name(self) -> str:
+ """
+ The base name of this object without any parent references.
+ """
+
+
+class TraversableResources(ResourceReader):
+ """
+ The required interface for providing traversable
+ resources.
+ """
+
+ @abc.abstractmethod
+ def files(self):
+ """Return a Traversable object for the loaded package."""
+
+ def open_resource(self, resource):
+ return self.files().joinpath(resource).open('rb')
+
+ def resource_path(self, resource):
+ raise FileNotFoundError(resource)
+
+ def is_resource(self, path):
+ return self.files().joinpath(path).is_file()
+
+ def contents(self):
+ return (item.name for item in self.files().iterdir())
diff --git a/pkg_resources/_vendor/importlib_resources/readers.py b/pkg_resources/_vendor/importlib_resources/readers.py
new file mode 100644
index 0000000..f1190ca
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/readers.py
@@ -0,0 +1,122 @@
+import collections
+import pathlib
+import operator
+
+from . import abc
+
+from ._itertools import unique_everseen
+from ._compat import ZipPath
+
+
+def remove_duplicates(items):
+ return iter(collections.OrderedDict.fromkeys(items))
+
+
+class FileReader(abc.TraversableResources):
+ def __init__(self, loader):
+ self.path = pathlib.Path(loader.path).parent
+
+ def resource_path(self, resource):
+ """
+ Return the file system path to prevent
+ `resources.path()` from creating a temporary
+ copy.
+ """
+ return str(self.path.joinpath(resource))
+
+ def files(self):
+ return self.path
+
+
+class ZipReader(abc.TraversableResources):
+ def __init__(self, loader, module):
+ _, _, name = module.rpartition('.')
+ self.prefix = loader.prefix.replace('\\', '/') + name + '/'
+ self.archive = loader.archive
+
+ def open_resource(self, resource):
+ try:
+ return super().open_resource(resource)
+ except KeyError as exc:
+ raise FileNotFoundError(exc.args[0])
+
+ def is_resource(self, path):
+ # workaround for `zipfile.Path.is_file` returning true
+ # for non-existent paths.
+ target = self.files().joinpath(path)
+ return target.is_file() and target.exists()
+
+ def files(self):
+ return ZipPath(self.archive, self.prefix)
+
+
+class MultiplexedPath(abc.Traversable):
+ """
+ Given a series of Traversable objects, implement a merged
+ version of the interface across all objects. Useful for
+ namespace packages which may be multihomed at a single
+ name.
+ """
+
+ def __init__(self, *paths):
+ self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
+ if not self._paths:
+ message = 'MultiplexedPath must contain at least one path'
+ raise FileNotFoundError(message)
+ if not all(path.is_dir() for path in self._paths):
+ raise NotADirectoryError('MultiplexedPath only supports directories')
+
+ def iterdir(self):
+ files = (file for path in self._paths for file in path.iterdir())
+ return unique_everseen(files, key=operator.attrgetter('name'))
+
+ def read_bytes(self):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ def read_text(self, *args, **kwargs):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ def is_dir(self):
+ return True
+
+ def is_file(self):
+ return False
+
+ def joinpath(self, child):
+ # first try to find child in current paths
+ for file in self.iterdir():
+ if file.name == child:
+ return file
+ # if it does not exist, construct it with the first path
+ return self._paths[0] / child
+
+ __truediv__ = joinpath
+
+ def open(self, *args, **kwargs):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ @property
+ def name(self):
+ return self._paths[0].name
+
+ def __repr__(self):
+ paths = ', '.join(f"'{path}'" for path in self._paths)
+ return f'MultiplexedPath({paths})'
+
+
+class NamespaceReader(abc.TraversableResources):
+ def __init__(self, namespace_path):
+ if 'NamespacePath' not in str(namespace_path):
+ raise ValueError('Invalid path')
+ self.path = MultiplexedPath(*list(namespace_path))
+
+ def resource_path(self, resource):
+ """
+ Return the file system path to prevent
+ `resources.path()` from creating a temporary
+ copy.
+ """
+ return str(self.path.joinpath(resource))
+
+ def files(self):
+ return self.path
diff --git a/pkg_resources/_vendor/importlib_resources/simple.py b/pkg_resources/_vendor/importlib_resources/simple.py
new file mode 100644
index 0000000..da073cb
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/simple.py
@@ -0,0 +1,116 @@
+"""
+Interface adapters for low-level readers.
+"""
+
+import abc
+import io
+import itertools
+from typing import BinaryIO, List
+
+from .abc import Traversable, TraversableResources
+
+
+class SimpleReader(abc.ABC):
+ """
+ The minimum, low-level interface required from a resource
+ provider.
+ """
+
+ @abc.abstractproperty
+ def package(self):
+ # type: () -> str
+ """
+ The name of the package for which this reader loads resources.
+ """
+
+ @abc.abstractmethod
+ def children(self):
+ # type: () -> List['SimpleReader']
+ """
+ Obtain an iterable of SimpleReader for available
+ child containers (e.g. directories).
+ """
+
+ @abc.abstractmethod
+ def resources(self):
+ # type: () -> List[str]
+ """
+ Obtain available named resources for this virtual package.
+ """
+
+ @abc.abstractmethod
+ def open_binary(self, resource):
+ # type: (str) -> BinaryIO
+ """
+ Obtain a File-like for a named resource.
+ """
+
+ @property
+ def name(self):
+ return self.package.split('.')[-1]
+
+
+class ResourceHandle(Traversable):
+ """
+ Handle to a named resource in a ResourceReader.
+ """
+
+ def __init__(self, parent, name):
+ # type: (ResourceContainer, str) -> None
+ self.parent = parent
+ self.name = name # type: ignore
+
+ def is_file(self):
+ return True
+
+ def is_dir(self):
+ return False
+
+ def open(self, mode='r', *args, **kwargs):
+ stream = self.parent.reader.open_binary(self.name)
+ if 'b' not in mode:
+ stream = io.TextIOWrapper(*args, **kwargs)
+ return stream
+
+ def joinpath(self, name):
+ raise RuntimeError("Cannot traverse into a resource")
+
+
+class ResourceContainer(Traversable):
+ """
+ Traversable container for a package's resources via its reader.
+ """
+
+ def __init__(self, reader):
+ # type: (SimpleReader) -> None
+ self.reader = reader
+
+ def is_dir(self):
+ return True
+
+ def is_file(self):
+ return False
+
+ def iterdir(self):
+ files = (ResourceHandle(self, name) for name in self.reader.resources)
+ dirs = map(ResourceContainer, self.reader.children())
+ return itertools.chain(files, dirs)
+
+ def open(self, *args, **kwargs):
+ raise IsADirectoryError()
+
+ def joinpath(self, name):
+ return next(
+ traversable for traversable in self.iterdir() if traversable.name == name
+ )
+
+
+class TraversableReader(TraversableResources, SimpleReader):
+ """
+ A TraversableResources based on SimpleReader. Resource providers
+ may derive from this class to provide the TraversableResources
+ interface by supplying the SimpleReader interface.
+ """
+
+ def files(self):
+ return ResourceContainer(self)
diff --git a/pkg_resources/_vendor/importlib_resources/tests/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/_compat.py b/pkg_resources/_vendor/importlib_resources/tests/_compat.py
new file mode 100644
index 0000000..4c99cff
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/_compat.py
@@ -0,0 +1,19 @@
+import os
+
+
+try:
+ from test.support import import_helper # type: ignore
+except ImportError:
+ # Python 3.9 and earlier
+ class import_helper: # type: ignore
+ from test.support import modules_setup, modules_cleanup
+
+
+try:
+ # Python 3.10
+ from test.support.os_helper import unlink
+except ImportError:
+ from test.support import unlink as _unlink
+
+ def unlink(target):
+ return _unlink(os.fspath(target))
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data01/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/data01/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data01/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data02/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/data02/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data02/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data02/one/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/data02/one/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data02/one/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data02/one/resource1.txt b/pkg_resources/_vendor/importlib_resources/tests/data02/one/resource1.txt
new file mode 100644
index 0000000..61a813e
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data02/one/resource1.txt
@@ -0,0 +1 @@
+one resource
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data02/two/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/data02/two/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data02/two/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/data02/two/resource2.txt b/pkg_resources/_vendor/importlib_resources/tests/data02/two/resource2.txt
new file mode 100644
index 0000000..a80ce46
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/data02/two/resource2.txt
@@ -0,0 +1 @@
+two resource
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py b/pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py
new file mode 100644
index 0000000..d92c7c5
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py
@@ -0,0 +1,102 @@
+import io
+import unittest
+
+import importlib_resources as resources
+
+from importlib_resources._adapters import (
+ CompatibilityFiles,
+ wrap_spec,
+)
+
+from . import util
+
+
+class CompatibilityFilesTests(unittest.TestCase):
+ @property
+ def package(self):
+ bytes_data = io.BytesIO(b'Hello, world!')
+ return util.create_package(
+ file=bytes_data,
+ path='some_path',
+ contents=('a', 'b', 'c'),
+ )
+
+ @property
+ def files(self):
+ return resources.files(self.package)
+
+ def test_spec_path_iter(self):
+ self.assertEqual(
+ sorted(path.name for path in self.files.iterdir()),
+ ['a', 'b', 'c'],
+ )
+
+ def test_child_path_iter(self):
+ self.assertEqual(list((self.files / 'a').iterdir()), [])
+
+ def test_orphan_path_iter(self):
+ self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
+ self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
+
+ def test_spec_path_is(self):
+ self.assertFalse(self.files.is_file())
+ self.assertFalse(self.files.is_dir())
+
+ def test_child_path_is(self):
+ self.assertTrue((self.files / 'a').is_file())
+ self.assertFalse((self.files / 'a').is_dir())
+
+ def test_orphan_path_is(self):
+ self.assertFalse((self.files / 'a' / 'a').is_file())
+ self.assertFalse((self.files / 'a' / 'a').is_dir())
+ self.assertFalse((self.files / 'a' / 'a' / 'a').is_file())
+ self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir())
+
+ def test_spec_path_name(self):
+ self.assertEqual(self.files.name, 'testingpackage')
+
+ def test_child_path_name(self):
+ self.assertEqual((self.files / 'a').name, 'a')
+
+ def test_orphan_path_name(self):
+ self.assertEqual((self.files / 'a' / 'b').name, 'b')
+ self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c')
+
+ def test_spec_path_open(self):
+ self.assertEqual(self.files.read_bytes(), b'Hello, world!')
+ self.assertEqual(self.files.read_text(), 'Hello, world!')
+
+ def test_child_path_open(self):
+ self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!')
+ self.assertEqual((self.files / 'a').read_text(), 'Hello, world!')
+
+ def test_orphan_path_open(self):
+ with self.assertRaises(FileNotFoundError):
+ (self.files / 'a' / 'b').read_bytes()
+ with self.assertRaises(FileNotFoundError):
+ (self.files / 'a' / 'b' / 'c').read_bytes()
+
+ def test_open_invalid_mode(self):
+ with self.assertRaises(ValueError):
+ self.files.open('0')
+
+ def test_orphan_path_invalid(self):
+ with self.assertRaises(ValueError):
+ CompatibilityFiles.OrphanPath()
+
+ def test_wrap_spec(self):
+ spec = wrap_spec(self.package)
+ self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles)
+
+
+class CompatibilityFilesNoReaderTests(unittest.TestCase):
+ @property
+ def package(self):
+ return util.create_package_from_loader(None)
+
+ @property
+ def files(self):
+ return resources.files(self.package)
+
+ def test_spec_path_joinpath(self):
+ self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath)
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_contents.py b/pkg_resources/_vendor/importlib_resources/tests/test_contents.py
new file mode 100644
index 0000000..525568e
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_contents.py
@@ -0,0 +1,43 @@
+import unittest
+import importlib_resources as resources
+
+from . import data01
+from . import util
+
+
+class ContentsTests:
+ expected = {
+ '__init__.py',
+ 'binary.file',
+ 'subdirectory',
+ 'utf-16.file',
+ 'utf-8.file',
+ }
+
+ def test_contents(self):
+ contents = {path.name for path in resources.files(self.data).iterdir()}
+ assert self.expected <= contents
+
+
+class ContentsDiskTests(ContentsTests, unittest.TestCase):
+ def setUp(self):
+ self.data = data01
+
+
+class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
+ pass
+
+
+class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
+ expected = {
+ # no __init__ because of namespace design
+ # no subdirectory as incidental difference in fixture
+ 'binary.file',
+ 'utf-16.file',
+ 'utf-8.file',
+ }
+
+ def setUp(self):
+ from . import namespacedata01
+
+ self.data = namespacedata01
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_files.py b/pkg_resources/_vendor/importlib_resources/tests/test_files.py
new file mode 100644
index 0000000..2676b49
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_files.py
@@ -0,0 +1,46 @@
+import typing
+import unittest
+
+import importlib_resources as resources
+from importlib_resources.abc import Traversable
+from . import data01
+from . import util
+
+
+class FilesTests:
+ def test_read_bytes(self):
+ files = resources.files(self.data)
+ actual = files.joinpath('utf-8.file').read_bytes()
+ assert actual == b'Hello, UTF-8 world!\n'
+
+ def test_read_text(self):
+ files = resources.files(self.data)
+ actual = files.joinpath('utf-8.file').read_text(encoding='utf-8')
+ assert actual == 'Hello, UTF-8 world!\n'
+
+ @unittest.skipUnless(
+ hasattr(typing, 'runtime_checkable'),
+ "Only suitable when typing supports runtime_checkable",
+ )
+ def test_traversable(self):
+ assert isinstance(resources.files(self.data), Traversable)
+
+
+class OpenDiskTests(FilesTests, unittest.TestCase):
+ def setUp(self):
+ self.data = data01
+
+
+class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
+ pass
+
+
+class OpenNamespaceTests(FilesTests, unittest.TestCase):
+ def setUp(self):
+ from . import namespacedata01
+
+ self.data = namespacedata01
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_open.py b/pkg_resources/_vendor/importlib_resources/tests/test_open.py
new file mode 100644
index 0000000..87b42c3
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_open.py
@@ -0,0 +1,81 @@
+import unittest
+
+import importlib_resources as resources
+from . import data01
+from . import util
+
+
+class CommonBinaryTests(util.CommonTests, unittest.TestCase):
+ def execute(self, package, path):
+ target = resources.files(package).joinpath(path)
+ with target.open('rb'):
+ pass
+
+
+class CommonTextTests(util.CommonTests, unittest.TestCase):
+ def execute(self, package, path):
+ target = resources.files(package).joinpath(path)
+ with target.open():
+ pass
+
+
+class OpenTests:
+ def test_open_binary(self):
+ target = resources.files(self.data) / 'binary.file'
+ with target.open('rb') as fp:
+ result = fp.read()
+ self.assertEqual(result, b'\x00\x01\x02\x03')
+
+ def test_open_text_default_encoding(self):
+ target = resources.files(self.data) / 'utf-8.file'
+ with target.open() as fp:
+ result = fp.read()
+ self.assertEqual(result, 'Hello, UTF-8 world!\n')
+
+ def test_open_text_given_encoding(self):
+ target = resources.files(self.data) / 'utf-16.file'
+ with target.open(encoding='utf-16', errors='strict') as fp:
+ result = fp.read()
+ self.assertEqual(result, 'Hello, UTF-16 world!\n')
+
+ def test_open_text_with_errors(self):
+ # Raises UnicodeError without the 'errors' argument.
+ target = resources.files(self.data) / 'utf-16.file'
+ with target.open(encoding='utf-8', errors='strict') as fp:
+ self.assertRaises(UnicodeError, fp.read)
+ with target.open(encoding='utf-8', errors='ignore') as fp:
+ result = fp.read()
+ self.assertEqual(
+ result,
+ 'H\x00e\x00l\x00l\x00o\x00,\x00 '
+ '\x00U\x00T\x00F\x00-\x001\x006\x00 '
+ '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
+ )
+
+ def test_open_binary_FileNotFoundError(self):
+ target = resources.files(self.data) / 'does-not-exist'
+ self.assertRaises(FileNotFoundError, target.open, 'rb')
+
+ def test_open_text_FileNotFoundError(self):
+ target = resources.files(self.data) / 'does-not-exist'
+ self.assertRaises(FileNotFoundError, target.open)
+
+
+class OpenDiskTests(OpenTests, unittest.TestCase):
+ def setUp(self):
+ self.data = data01
+
+
+class OpenDiskNamespaceTests(OpenTests, unittest.TestCase):
+ def setUp(self):
+ from . import namespacedata01
+
+ self.data = namespacedata01
+
+
+class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase):
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_path.py b/pkg_resources/_vendor/importlib_resources/tests/test_path.py
new file mode 100644
index 0000000..4f4d394
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_path.py
@@ -0,0 +1,64 @@
+import io
+import unittest
+
+import importlib_resources as resources
+from . import data01
+from . import util
+
+
+class CommonTests(util.CommonTests, unittest.TestCase):
+ def execute(self, package, path):
+ with resources.as_file(resources.files(package).joinpath(path)):
+ pass
+
+
+class PathTests:
+ def test_reading(self):
+ # Path should be readable.
+ # Test also implicitly verifies the returned object is a pathlib.Path
+ # instance.
+ target = resources.files(self.data) / 'utf-8.file'
+ with resources.as_file(target) as path:
+ self.assertTrue(path.name.endswith("utf-8.file"), repr(path))
+ # pathlib.Path.read_text() was introduced in Python 3.5.
+ with path.open('r', encoding='utf-8') as file:
+ text = file.read()
+ self.assertEqual('Hello, UTF-8 world!\n', text)
+
+
+class PathDiskTests(PathTests, unittest.TestCase):
+ data = data01
+
+ def test_natural_path(self):
+ """
+ Guarantee the internal implementation detail that
+ file-system-backed resources do not get the tempdir
+ treatment.
+ """
+ target = resources.files(self.data) / 'utf-8.file'
+ with resources.as_file(target) as path:
+ assert 'data' in str(path)
+
+
+class PathMemoryTests(PathTests, unittest.TestCase):
+ def setUp(self):
+ file = io.BytesIO(b'Hello, UTF-8 world!\n')
+ self.addCleanup(file.close)
+ self.data = util.create_package(
+ file=file, path=FileNotFoundError("package exists only in memory")
+ )
+ self.data.__spec__.origin = None
+ self.data.__spec__.has_location = False
+
+
+class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase):
+ def test_remove_in_context_manager(self):
+ # It is not an error if the file that was temporarily stashed on the
+ # file system is removed inside the `with` stanza.
+ target = resources.files(self.data) / 'utf-8.file'
+ with resources.as_file(target) as path:
+ path.unlink()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_read.py b/pkg_resources/_vendor/importlib_resources/tests/test_read.py
new file mode 100644
index 0000000..41dd6db
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_read.py
@@ -0,0 +1,76 @@
+import unittest
+import importlib_resources as resources
+
+from . import data01
+from . import util
+from importlib import import_module
+
+
+class CommonBinaryTests(util.CommonTests, unittest.TestCase):
+ def execute(self, package, path):
+ resources.files(package).joinpath(path).read_bytes()
+
+
+class CommonTextTests(util.CommonTests, unittest.TestCase):
+ def execute(self, package, path):
+ resources.files(package).joinpath(path).read_text()
+
+
+class ReadTests:
+ def test_read_bytes(self):
+ result = resources.files(self.data).joinpath('binary.file').read_bytes()
+ self.assertEqual(result, b'\0\1\2\3')
+
+ def test_read_text_default_encoding(self):
+ result = resources.files(self.data).joinpath('utf-8.file').read_text()
+ self.assertEqual(result, 'Hello, UTF-8 world!\n')
+
+ def test_read_text_given_encoding(self):
+ result = (
+ resources.files(self.data)
+ .joinpath('utf-16.file')
+ .read_text(encoding='utf-16')
+ )
+ self.assertEqual(result, 'Hello, UTF-16 world!\n')
+
+ def test_read_text_with_errors(self):
+ # Raises UnicodeError without the 'errors' argument.
+ target = resources.files(self.data) / 'utf-16.file'
+ self.assertRaises(UnicodeError, target.read_text, encoding='utf-8')
+ result = target.read_text(encoding='utf-8', errors='ignore')
+ self.assertEqual(
+ result,
+ 'H\x00e\x00l\x00l\x00o\x00,\x00 '
+ '\x00U\x00T\x00F\x00-\x001\x006\x00 '
+ '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
+ )
+
+
+class ReadDiskTests(ReadTests, unittest.TestCase):
+ data = data01
+
+
+class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
+ def test_read_submodule_resource(self):
+ submodule = import_module('ziptestdata.subdirectory')
+ result = resources.files(submodule).joinpath('binary.file').read_bytes()
+ self.assertEqual(result, b'\0\1\2\3')
+
+ def test_read_submodule_resource_by_name(self):
+ result = (
+ resources.files('ziptestdata.subdirectory')
+ .joinpath('binary.file')
+ .read_bytes()
+ )
+ self.assertEqual(result, b'\0\1\2\3')
+
+
+class ReadNamespaceTests(ReadTests, unittest.TestCase):
+ def setUp(self):
+ from . import namespacedata01
+
+ self.data = namespacedata01
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_reader.py b/pkg_resources/_vendor/importlib_resources/tests/test_reader.py
new file mode 100644
index 0000000..16841a5
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_reader.py
@@ -0,0 +1,128 @@
+import os.path
+import sys
+import pathlib
+import unittest
+
+from importlib import import_module
+from importlib_resources.readers import MultiplexedPath, NamespaceReader
+
+
+class MultiplexedPathTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ path = pathlib.Path(__file__).parent / 'namespacedata01'
+ cls.folder = str(path)
+
+ def test_init_no_paths(self):
+ with self.assertRaises(FileNotFoundError):
+ MultiplexedPath()
+
+ def test_init_file(self):
+ with self.assertRaises(NotADirectoryError):
+ MultiplexedPath(os.path.join(self.folder, 'binary.file'))
+
+ def test_iterdir(self):
+ contents = {path.name for path in MultiplexedPath(self.folder).iterdir()}
+ try:
+ contents.remove('__pycache__')
+ except (KeyError, ValueError):
+ pass
+ self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'})
+
+ def test_iterdir_duplicate(self):
+ data01 = os.path.abspath(os.path.join(__file__, '..', 'data01'))
+ contents = {
+ path.name for path in MultiplexedPath(self.folder, data01).iterdir()
+ }
+ for remove in ('__pycache__', '__init__.pyc'):
+ try:
+ contents.remove(remove)
+ except (KeyError, ValueError):
+ pass
+ self.assertEqual(
+ contents,
+ {'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'},
+ )
+
+ def test_is_dir(self):
+ self.assertEqual(MultiplexedPath(self.folder).is_dir(), True)
+
+ def test_is_file(self):
+ self.assertEqual(MultiplexedPath(self.folder).is_file(), False)
+
+ def test_open_file(self):
+ path = MultiplexedPath(self.folder)
+ with self.assertRaises(FileNotFoundError):
+ path.read_bytes()
+ with self.assertRaises(FileNotFoundError):
+ path.read_text()
+ with self.assertRaises(FileNotFoundError):
+ path.open()
+
+ def test_join_path(self):
+ prefix = os.path.abspath(os.path.join(__file__, '..'))
+ data01 = os.path.join(prefix, 'data01')
+ path = MultiplexedPath(self.folder, data01)
+ self.assertEqual(
+ str(path.joinpath('binary.file'))[len(prefix) + 1 :],
+ os.path.join('namespacedata01', 'binary.file'),
+ )
+ self.assertEqual(
+ str(path.joinpath('subdirectory'))[len(prefix) + 1 :],
+ os.path.join('data01', 'subdirectory'),
+ )
+ self.assertEqual(
+ str(path.joinpath('imaginary'))[len(prefix) + 1 :],
+ os.path.join('namespacedata01', 'imaginary'),
+ )
+
+ def test_repr(self):
+ self.assertEqual(
+ repr(MultiplexedPath(self.folder)),
+ f"MultiplexedPath('{self.folder}')",
+ )
+
+ def test_name(self):
+ self.assertEqual(
+ MultiplexedPath(self.folder).name,
+ os.path.basename(self.folder),
+ )
+
+
+class NamespaceReaderTest(unittest.TestCase):
+ site_dir = str(pathlib.Path(__file__).parent)
+
+ @classmethod
+ def setUpClass(cls):
+ sys.path.append(cls.site_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ sys.path.remove(cls.site_dir)
+
+ def test_init_error(self):
+ with self.assertRaises(ValueError):
+ NamespaceReader(['path1', 'path2'])
+
+ def test_resource_path(self):
+ namespacedata01 = import_module('namespacedata01')
+ reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
+
+ root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
+ self.assertEqual(
+ reader.resource_path('binary.file'), os.path.join(root, 'binary.file')
+ )
+ self.assertEqual(
+ reader.resource_path('imaginary'), os.path.join(root, 'imaginary')
+ )
+
+ def test_files(self):
+ namespacedata01 = import_module('namespacedata01')
+ reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
+ root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
+ self.assertIsInstance(reader.files(), MultiplexedPath)
+ self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/test_resource.py b/pkg_resources/_vendor/importlib_resources/tests/test_resource.py
new file mode 100644
index 0000000..5affd8b
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/test_resource.py
@@ -0,0 +1,252 @@
+import sys
+import unittest
+import importlib_resources as resources
+import uuid
+import pathlib
+
+from . import data01
+from . import zipdata01, zipdata02
+from . import util
+from importlib import import_module
+from ._compat import import_helper, unlink
+
+
+class ResourceTests:
+ # Subclasses are expected to set the `data` attribute.
+
+ def test_is_file_exists(self):
+ target = resources.files(self.data) / 'binary.file'
+ self.assertTrue(target.is_file())
+
+ def test_is_file_missing(self):
+ target = resources.files(self.data) / 'not-a-file'
+ self.assertFalse(target.is_file())
+
+ def test_is_dir(self):
+ target = resources.files(self.data) / 'subdirectory'
+ self.assertFalse(target.is_file())
+ self.assertTrue(target.is_dir())
+
+
+class ResourceDiskTests(ResourceTests, unittest.TestCase):
+ def setUp(self):
+ self.data = data01
+
+
+class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase):
+ pass
+
+
+def names(traversable):
+ return {item.name for item in traversable.iterdir()}
+
+
+class ResourceLoaderTests(unittest.TestCase):
+ def test_resource_contents(self):
+ package = util.create_package(
+ file=data01, path=data01.__file__, contents=['A', 'B', 'C']
+ )
+ self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'})
+
+ def test_is_file(self):
+ package = util.create_package(
+ file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
+ )
+ self.assertTrue(resources.files(package).joinpath('B').is_file())
+
+ def test_is_dir(self):
+ package = util.create_package(
+ file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
+ )
+ self.assertTrue(resources.files(package).joinpath('D').is_dir())
+
+ def test_resource_missing(self):
+ package = util.create_package(
+ file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
+ )
+ self.assertFalse(resources.files(package).joinpath('Z').is_file())
+
+
+class ResourceCornerCaseTests(unittest.TestCase):
+ def test_package_has_no_reader_fallback(self):
+ # Test odd ball packages which:
+ # 1. Do not have a ResourceReader as a loader
+ # 2. Are not on the file system
+ # 3. Are not in a zip file
+ module = util.create_package(
+ file=data01, path=data01.__file__, contents=['A', 'B', 'C']
+ )
+ # Give the module a dummy loader.
+ module.__loader__ = object()
+ # Give the module a dummy origin.
+ module.__file__ = '/path/which/shall/not/be/named'
+ module.__spec__.loader = module.__loader__
+ module.__spec__.origin = module.__file__
+ self.assertFalse(resources.files(module).joinpath('A').is_file())
+
+
+class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase):
+ ZIP_MODULE = zipdata01 # type: ignore
+
+ def test_is_submodule_resource(self):
+ submodule = import_module('ziptestdata.subdirectory')
+ self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file())
+
+ def test_read_submodule_resource_by_name(self):
+ self.assertTrue(
+ resources.files('ziptestdata.subdirectory')
+ .joinpath('binary.file')
+ .is_file()
+ )
+
+ def test_submodule_contents(self):
+ submodule = import_module('ziptestdata.subdirectory')
+ self.assertEqual(
+ names(resources.files(submodule)), {'__init__.py', 'binary.file'}
+ )
+
+ def test_submodule_contents_by_name(self):
+ self.assertEqual(
+ names(resources.files('ziptestdata.subdirectory')),
+ {'__init__.py', 'binary.file'},
+ )
+
+
+class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase):
+ ZIP_MODULE = zipdata02 # type: ignore
+
+ def test_unrelated_contents(self):
+ """
+ Test thata zip with two unrelated subpackages return
+ distinct resources. Ref python/importlib_resources#44.
+ """
+ self.assertEqual(
+ names(resources.files('ziptestdata.one')),
+ {'__init__.py', 'resource1.txt'},
+ )
+ self.assertEqual(
+ names(resources.files('ziptestdata.two')),
+ {'__init__.py', 'resource2.txt'},
+ )
+
+
+class DeletingZipsTest(unittest.TestCase):
+ """Having accessed resources in a zip file should not keep an open
+ reference to the zip.
+ """
+
+ ZIP_MODULE = zipdata01
+
+ def setUp(self):
+ modules = import_helper.modules_setup()
+ self.addCleanup(import_helper.modules_cleanup, *modules)
+
+ data_path = pathlib.Path(self.ZIP_MODULE.__file__)
+ data_dir = data_path.parent
+ self.source_zip_path = data_dir / 'ziptestdata.zip'
+ self.zip_path = pathlib.Path(f'{uuid.uuid4()}.zip').absolute()
+ self.zip_path.write_bytes(self.source_zip_path.read_bytes())
+ sys.path.append(str(self.zip_path))
+ self.data = import_module('ziptestdata')
+
+ def tearDown(self):
+ try:
+ sys.path.remove(str(self.zip_path))
+ except ValueError:
+ pass
+
+ try:
+ del sys.path_importer_cache[str(self.zip_path)]
+ del sys.modules[self.data.__name__]
+ except KeyError:
+ pass
+
+ try:
+ unlink(self.zip_path)
+ except OSError:
+ # If the test fails, this will probably fail too
+ pass
+
+ def test_iterdir_does_not_keep_open(self):
+ c = [item.name for item in resources.files('ziptestdata').iterdir()]
+ self.zip_path.unlink()
+ del c
+
+ def test_is_file_does_not_keep_open(self):
+ c = resources.files('ziptestdata').joinpath('binary.file').is_file()
+ self.zip_path.unlink()
+ del c
+
+ def test_is_file_failure_does_not_keep_open(self):
+ c = resources.files('ziptestdata').joinpath('not-present').is_file()
+ self.zip_path.unlink()
+ del c
+
+ @unittest.skip("Desired but not supported.")
+ def test_as_file_does_not_keep_open(self): # pragma: no cover
+ c = resources.as_file(resources.files('ziptestdata') / 'binary.file')
+ self.zip_path.unlink()
+ del c
+
+ def test_entered_path_does_not_keep_open(self):
+ # This is what certifi does on import to make its bundle
+ # available for the process duration.
+ c = resources.as_file(
+ resources.files('ziptestdata') / 'binary.file'
+ ).__enter__()
+ self.zip_path.unlink()
+ del c
+
+ def test_read_binary_does_not_keep_open(self):
+ c = resources.files('ziptestdata').joinpath('binary.file').read_bytes()
+ self.zip_path.unlink()
+ del c
+
+ def test_read_text_does_not_keep_open(self):
+ c = resources.files('ziptestdata').joinpath('utf-8.file').read_text()
+ self.zip_path.unlink()
+ del c
+
+
+class ResourceFromNamespaceTest01(unittest.TestCase):
+ site_dir = str(pathlib.Path(__file__).parent)
+
+ @classmethod
+ def setUpClass(cls):
+ sys.path.append(cls.site_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ sys.path.remove(cls.site_dir)
+
+ def test_is_submodule_resource(self):
+ self.assertTrue(
+ resources.files(import_module('namespacedata01'))
+ .joinpath('binary.file')
+ .is_file()
+ )
+
+ def test_read_submodule_resource_by_name(self):
+ self.assertTrue(
+ resources.files('namespacedata01').joinpath('binary.file').is_file()
+ )
+
+ def test_submodule_contents(self):
+ contents = names(resources.files(import_module('namespacedata01')))
+ try:
+ contents.remove('__pycache__')
+ except KeyError:
+ pass
+ self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
+
+ def test_submodule_contents_by_name(self):
+ contents = names(resources.files('namespacedata01'))
+ try:
+ contents.remove('__pycache__')
+ except KeyError:
+ pass
+ self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/update-zips.py b/pkg_resources/_vendor/importlib_resources/tests/update-zips.py
new file mode 100644
index 0000000..9ef0224
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/update-zips.py
@@ -0,0 +1,53 @@
+"""
+Generate the zip test data files.
+
+Run to build the tests/zipdataNN/ziptestdata.zip files from
+files in tests/dataNN.
+
+Replaces the file with the working copy, but does commit anything
+to the source repo.
+"""
+
+import contextlib
+import os
+import pathlib
+import zipfile
+
+
+def main():
+ """
+ >>> from unittest import mock
+ >>> monkeypatch = getfixture('monkeypatch')
+ >>> monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock())
+ >>> print(); main() # print workaround for bpo-32509
+ <BLANKLINE>
+ ...data01... -> ziptestdata/...
+ ...
+ ...data02... -> ziptestdata/...
+ ...
+ """
+ suffixes = '01', '02'
+ tuple(map(generate, suffixes))
+
+
+def generate(suffix):
+ root = pathlib.Path(__file__).parent.relative_to(os.getcwd())
+ zfpath = root / f'zipdata{suffix}/ziptestdata.zip'
+ with zipfile.ZipFile(zfpath, 'w') as zf:
+ for src, rel in walk(root / f'data{suffix}'):
+ dst = 'ziptestdata' / pathlib.PurePosixPath(rel.as_posix())
+ print(src, '->', dst)
+ zf.write(src, dst)
+
+
+def walk(datapath):
+ for dirpath, dirnames, filenames in os.walk(datapath):
+ with contextlib.suppress(KeyError):
+ dirnames.remove('__pycache__')
+ for filename in filenames:
+ res = pathlib.Path(dirpath) / filename
+ rel = res.relative_to(datapath)
+ yield res, rel
+
+
+__name__ == '__main__' and main()
diff --git a/pkg_resources/_vendor/importlib_resources/tests/util.py b/pkg_resources/_vendor/importlib_resources/tests/util.py
new file mode 100644
index 0000000..c6d83e4
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/util.py
@@ -0,0 +1,178 @@
+import abc
+import importlib
+import io
+import sys
+import types
+from pathlib import Path, PurePath
+
+from . import data01
+from . import zipdata01
+from ..abc import ResourceReader
+from ._compat import import_helper
+
+
+from importlib.machinery import ModuleSpec
+
+
+class Reader(ResourceReader):
+ def __init__(self, **kwargs):
+ vars(self).update(kwargs)
+
+ def get_resource_reader(self, package):
+ return self
+
+ def open_resource(self, path):
+ self._path = path
+ if isinstance(self.file, Exception):
+ raise self.file
+ return self.file
+
+ def resource_path(self, path_):
+ self._path = path_
+ if isinstance(self.path, Exception):
+ raise self.path
+ return self.path
+
+ def is_resource(self, path_):
+ self._path = path_
+ if isinstance(self.path, Exception):
+ raise self.path
+
+ def part(entry):
+ return entry.split('/')
+
+ return any(
+ len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents)
+ )
+
+ def contents(self):
+ if isinstance(self.path, Exception):
+ raise self.path
+ yield from self._contents
+
+
+def create_package_from_loader(loader, is_package=True):
+ name = 'testingpackage'
+ module = types.ModuleType(name)
+ spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package)
+ module.__spec__ = spec
+ module.__loader__ = loader
+ return module
+
+
+def create_package(file=None, path=None, is_package=True, contents=()):
+ return create_package_from_loader(
+ Reader(file=file, path=path, _contents=contents),
+ is_package,
+ )
+
+
+class CommonTests(metaclass=abc.ABCMeta):
+ """
+ Tests shared by test_open, test_path, and test_read.
+ """
+
+ @abc.abstractmethod
+ def execute(self, package, path):
+ """
+ Call the pertinent legacy API function (e.g. open_text, path)
+ on package and path.
+ """
+
+ def test_package_name(self):
+ # Passing in the package name should succeed.
+ self.execute(data01.__name__, 'utf-8.file')
+
+ def test_package_object(self):
+ # Passing in the package itself should succeed.
+ self.execute(data01, 'utf-8.file')
+
+ def test_string_path(self):
+ # Passing in a string for the path should succeed.
+ path = 'utf-8.file'
+ self.execute(data01, path)
+
+ def test_pathlib_path(self):
+ # Passing in a pathlib.PurePath object for the path should succeed.
+ path = PurePath('utf-8.file')
+ self.execute(data01, path)
+
+ def test_importing_module_as_side_effect(self):
+ # The anchor package can already be imported.
+ del sys.modules[data01.__name__]
+ self.execute(data01.__name__, 'utf-8.file')
+
+ def test_non_package_by_name(self):
+ # The anchor package cannot be a module.
+ with self.assertRaises(TypeError):
+ self.execute(__name__, 'utf-8.file')
+
+ def test_non_package_by_package(self):
+ # The anchor package cannot be a module.
+ with self.assertRaises(TypeError):
+ module = sys.modules['importlib_resources.tests.util']
+ self.execute(module, 'utf-8.file')
+
+ def test_missing_path(self):
+ # Attempting to open or read or request the path for a
+ # non-existent path should succeed if open_resource
+ # can return a viable data stream.
+ bytes_data = io.BytesIO(b'Hello, world!')
+ package = create_package(file=bytes_data, path=FileNotFoundError())
+ self.execute(package, 'utf-8.file')
+ self.assertEqual(package.__loader__._path, 'utf-8.file')
+
+ def test_extant_path(self):
+ # Attempting to open or read or request the path when the
+ # path does exist should still succeed. Does not assert
+ # anything about the result.
+ bytes_data = io.BytesIO(b'Hello, world!')
+ # any path that exists
+ path = __file__
+ package = create_package(file=bytes_data, path=path)
+ self.execute(package, 'utf-8.file')
+ self.assertEqual(package.__loader__._path, 'utf-8.file')
+
+ def test_useless_loader(self):
+ package = create_package(file=FileNotFoundError(), path=FileNotFoundError())
+ with self.assertRaises(FileNotFoundError):
+ self.execute(package, 'utf-8.file')
+
+
+class ZipSetupBase:
+ ZIP_MODULE = None
+
+ @classmethod
+ def setUpClass(cls):
+ data_path = Path(cls.ZIP_MODULE.__file__)
+ data_dir = data_path.parent
+ cls._zip_path = str(data_dir / 'ziptestdata.zip')
+ sys.path.append(cls._zip_path)
+ cls.data = importlib.import_module('ziptestdata')
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ sys.path.remove(cls._zip_path)
+ except ValueError:
+ pass
+
+ try:
+ del sys.path_importer_cache[cls._zip_path]
+ del sys.modules[cls.data.__name__]
+ except KeyError:
+ pass
+
+ try:
+ del cls.data
+ del cls._zip_path
+ except AttributeError:
+ pass
+
+ def setUp(self):
+ modules = import_helper.modules_setup()
+ self.addCleanup(import_helper.modules_cleanup, *modules)
+
+
+class ZipSetup(ZipSetupBase):
+ ZIP_MODULE = zipdata01 # type: ignore
diff --git a/pkg_resources/_vendor/importlib_resources/tests/zipdata01/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/zipdata01/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/zipdata01/__init__.py
diff --git a/pkg_resources/_vendor/importlib_resources/tests/zipdata02/__init__.py b/pkg_resources/_vendor/importlib_resources/tests/zipdata02/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/importlib_resources/tests/zipdata02/__init__.py
diff --git a/pkg_resources/_vendor/jaraco.context-4.1.1.dist-info/top_level.txt b/pkg_resources/_vendor/jaraco.context-4.1.1.dist-info/top_level.txt
new file mode 100644
index 0000000..f6205a5
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco.context-4.1.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+jaraco
diff --git a/pkg_resources/_vendor/jaraco.functools-3.5.0.dist-info/top_level.txt b/pkg_resources/_vendor/jaraco.functools-3.5.0.dist-info/top_level.txt
new file mode 100644
index 0000000..f6205a5
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco.functools-3.5.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+jaraco
diff --git a/pkg_resources/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt b/pkg_resources/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt
new file mode 100644
index 0000000..f6205a5
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+jaraco
diff --git a/pkg_resources/_vendor/jaraco/__init__.py b/pkg_resources/_vendor/jaraco/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco/__init__.py
diff --git a/pkg_resources/_vendor/jaraco/context.py b/pkg_resources/_vendor/jaraco/context.py
new file mode 100644
index 0000000..87a4e3d
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco/context.py
@@ -0,0 +1,213 @@
+import os
+import subprocess
+import contextlib
+import functools
+import tempfile
+import shutil
+import operator
+
+
+@contextlib.contextmanager
+def pushd(dir):
+ orig = os.getcwd()
+ os.chdir(dir)
+ try:
+ yield dir
+ finally:
+ os.chdir(orig)
+
+
+@contextlib.contextmanager
+def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
+ """
+ Get a tarball, extract it, change to that directory, yield, then
+ clean up.
+ `runner` is the function to invoke commands.
+ `pushd` is a context manager for changing the directory.
+ """
+ if target_dir is None:
+ target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
+ if runner is None:
+ runner = functools.partial(subprocess.check_call, shell=True)
+ # In the tar command, use --strip-components=1 to strip the first path and
+ # then
+ # use -C to cause the files to be extracted to {target_dir}. This ensures
+ # that we always know where the files were extracted.
+ runner('mkdir {target_dir}'.format(**vars()))
+ try:
+ getter = 'wget {url} -O -'
+ extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
+ cmd = ' | '.join((getter, extract))
+ runner(cmd.format(compression=infer_compression(url), **vars()))
+ with pushd(target_dir):
+ yield target_dir
+ finally:
+ runner('rm -Rf {target_dir}'.format(**vars()))
+
+
+def infer_compression(url):
+ """
+ Given a URL or filename, infer the compression code for tar.
+ """
+ # cheat and just assume it's the last two characters
+ compression_indicator = url[-2:]
+ mapping = dict(gz='z', bz='j', xz='J')
+ # Assume 'z' (gzip) if no match
+ return mapping.get(compression_indicator, 'z')
+
+
+@contextlib.contextmanager
+def temp_dir(remover=shutil.rmtree):
+ """
+ Create a temporary directory context. Pass a custom remover
+ to override the removal behavior.
+ """
+ temp_dir = tempfile.mkdtemp()
+ try:
+ yield temp_dir
+ finally:
+ remover(temp_dir)
+
+
+@contextlib.contextmanager
+def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
+ """
+ Check out the repo indicated by url.
+
+ If dest_ctx is supplied, it should be a context manager
+ to yield the target directory for the check out.
+ """
+ exe = 'git' if 'git' in url else 'hg'
+ with dest_ctx() as repo_dir:
+ cmd = [exe, 'clone', url, repo_dir]
+ if branch:
+ cmd.extend(['--branch', branch])
+ devnull = open(os.path.devnull, 'w')
+ stdout = devnull if quiet else None
+ subprocess.check_call(cmd, stdout=stdout)
+ yield repo_dir
+
+
+@contextlib.contextmanager
+def null():
+ yield
+
+
+class ExceptionTrap:
+ """
+ A context manager that will catch certain exceptions and provide an
+ indication they occurred.
+
+ >>> with ExceptionTrap() as trap:
+ ... raise Exception()
+ >>> bool(trap)
+ True
+
+ >>> with ExceptionTrap() as trap:
+ ... pass
+ >>> bool(trap)
+ False
+
+ >>> with ExceptionTrap(ValueError) as trap:
+ ... raise ValueError("1 + 1 is not 3")
+ >>> bool(trap)
+ True
+
+ >>> with ExceptionTrap(ValueError) as trap:
+ ... raise Exception()
+ Traceback (most recent call last):
+ ...
+ Exception
+
+ >>> bool(trap)
+ False
+ """
+
+ exc_info = None, None, None
+
+ def __init__(self, exceptions=(Exception,)):
+ self.exceptions = exceptions
+
+ def __enter__(self):
+ return self
+
+ @property
+ def type(self):
+ return self.exc_info[0]
+
+ @property
+ def value(self):
+ return self.exc_info[1]
+
+ @property
+ def tb(self):
+ return self.exc_info[2]
+
+ def __exit__(self, *exc_info):
+ type = exc_info[0]
+ matches = type and issubclass(type, self.exceptions)
+ if matches:
+ self.exc_info = exc_info
+ return matches
+
+ def __bool__(self):
+ return bool(self.type)
+
+ def raises(self, func, *, _test=bool):
+ """
+ Wrap func and replace the result with the truth
+ value of the trap (True if an exception occurred).
+
+ First, give the decorator an alias to support Python 3.8
+ Syntax.
+
+ >>> raises = ExceptionTrap(ValueError).raises
+
+ Now decorate a function that always fails.
+
+ >>> @raises
+ ... def fail():
+ ... raise ValueError('failed')
+ >>> fail()
+ True
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ with ExceptionTrap(self.exceptions) as trap:
+ func(*args, **kwargs)
+ return _test(trap)
+
+ return wrapper
+
+ def passes(self, func):
+ """
+ Wrap func and replace the result with the truth
+ value of the trap (True if no exception).
+
+ First, give the decorator an alias to support Python 3.8
+ Syntax.
+
+ >>> passes = ExceptionTrap(ValueError).passes
+
+ Now decorate a function that always fails.
+
+ >>> @passes
+ ... def fail():
+ ... raise ValueError('failed')
+
+ >>> fail()
+ False
+ """
+ return self.raises(func, _test=operator.not_)
+
+
+class suppress(contextlib.suppress, contextlib.ContextDecorator):
+ """
+ A version of contextlib.suppress with decorator support.
+
+ >>> @suppress(KeyError)
+ ... def key_error():
+ ... {}['']
+ >>> key_error()
+ """
diff --git a/pkg_resources/_vendor/jaraco/functools.py b/pkg_resources/_vendor/jaraco/functools.py
new file mode 100644
index 0000000..a3fea3a
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco/functools.py
@@ -0,0 +1,525 @@
+import functools
+import time
+import inspect
+import collections
+import types
+import itertools
+
+import pkg_resources.extern.more_itertools
+
+from typing import Callable, TypeVar
+
+
+CallableT = TypeVar("CallableT", bound=Callable[..., object])
+
+
+def compose(*funcs):
+ """
+ Compose any number of unary functions into a single unary function.
+
+ >>> import textwrap
+ >>> expected = str.strip(textwrap.dedent(compose.__doc__))
+ >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
+ >>> strip_and_dedent(compose.__doc__) == expected
+ True
+
+ Compose also allows the innermost function to take arbitrary arguments.
+
+ >>> round_three = lambda x: round(x, ndigits=3)
+ >>> f = compose(round_three, int.__truediv__)
+ >>> [f(3*x, x+1) for x in range(1,10)]
+ [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
+ """
+
+ def compose_two(f1, f2):
+ return lambda *args, **kwargs: f1(f2(*args, **kwargs))
+
+ return functools.reduce(compose_two, funcs)
+
+
+def method_caller(method_name, *args, **kwargs):
+ """
+ Return a function that will call a named method on the
+ target object with optional positional and keyword
+ arguments.
+
+ >>> lower = method_caller('lower')
+ >>> lower('MyString')
+ 'mystring'
+ """
+
+ def call_method(target):
+ func = getattr(target, method_name)
+ return func(*args, **kwargs)
+
+ return call_method
+
+
+def once(func):
+ """
+ Decorate func so it's only ever called the first time.
+
+ This decorator can ensure that an expensive or non-idempotent function
+ will not be expensive on subsequent calls and is idempotent.
+
+ >>> add_three = once(lambda a: a+3)
+ >>> add_three(3)
+ 6
+ >>> add_three(9)
+ 6
+ >>> add_three('12')
+ 6
+
+ To reset the stored value, simply clear the property ``saved_result``.
+
+ >>> del add_three.saved_result
+ >>> add_three(9)
+ 12
+ >>> add_three(8)
+ 12
+
+ Or invoke 'reset()' on it.
+
+ >>> add_three.reset()
+ >>> add_three(-3)
+ 0
+ >>> add_three(0)
+ 0
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if not hasattr(wrapper, 'saved_result'):
+ wrapper.saved_result = func(*args, **kwargs)
+ return wrapper.saved_result
+
+ wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
+ return wrapper
+
+
+def method_cache(
+ method: CallableT,
+ cache_wrapper: Callable[
+ [CallableT], CallableT
+ ] = functools.lru_cache(), # type: ignore[assignment]
+) -> CallableT:
+ """
+ Wrap lru_cache to support storing the cache data in the object instances.
+
+ Abstracts the common paradigm where the method explicitly saves an
+ underscore-prefixed protected property on first call and returns that
+ subsequently.
+
+ >>> class MyClass:
+ ... calls = 0
+ ...
+ ... @method_cache
+ ... def method(self, value):
+ ... self.calls += 1
+ ... return value
+
+ >>> a = MyClass()
+ >>> a.method(3)
+ 3
+ >>> for x in range(75):
+ ... res = a.method(x)
+ >>> a.calls
+ 75
+
+ Note that the apparent behavior will be exactly like that of lru_cache
+ except that the cache is stored on each instance, so values in one
+ instance will not flush values from another, and when an instance is
+ deleted, so are the cached values for that instance.
+
+ >>> b = MyClass()
+ >>> for x in range(35):
+ ... res = b.method(x)
+ >>> b.calls
+ 35
+ >>> a.method(0)
+ 0
+ >>> a.calls
+ 75
+
+ Note that if method had been decorated with ``functools.lru_cache()``,
+ a.calls would have been 76 (due to the cached value of 0 having been
+ flushed by the 'b' instance).
+
+ Clear the cache with ``.cache_clear()``
+
+ >>> a.method.cache_clear()
+
+ Same for a method that hasn't yet been called.
+
+ >>> c = MyClass()
+ >>> c.method.cache_clear()
+
+ Another cache wrapper may be supplied:
+
+ >>> cache = functools.lru_cache(maxsize=2)
+ >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
+ >>> a = MyClass()
+ >>> a.method2()
+ 3
+
+ Caution - do not subsequently wrap the method with another decorator, such
+ as ``@property``, which changes the semantics of the function.
+
+ See also
+ http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
+ for another implementation and additional justification.
+ """
+
+ def wrapper(self: object, *args: object, **kwargs: object) -> object:
+ # it's the first call, replace the method with a cached, bound method
+ bound_method: CallableT = types.MethodType( # type: ignore[assignment]
+ method, self
+ )
+ cached_method = cache_wrapper(bound_method)
+ setattr(self, method.__name__, cached_method)
+ return cached_method(*args, **kwargs)
+
+ # Support cache clear even before cache has been created.
+ wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
+
+ return ( # type: ignore[return-value]
+ _special_method_cache(method, cache_wrapper) or wrapper
+ )
+
+
+def _special_method_cache(method, cache_wrapper):
+ """
+ Because Python treats special methods differently, it's not
+ possible to use instance attributes to implement the cached
+ methods.
+
+ Instead, install the wrapper method under a different name
+ and return a simple proxy to that wrapper.
+
+ https://github.com/jaraco/jaraco.functools/issues/5
+ """
+ name = method.__name__
+ special_names = '__getattr__', '__getitem__'
+ if name not in special_names:
+ return
+
+ wrapper_name = '__cached' + name
+
+ def proxy(self, *args, **kwargs):
+ if wrapper_name not in vars(self):
+ bound = types.MethodType(method, self)
+ cache = cache_wrapper(bound)
+ setattr(self, wrapper_name, cache)
+ else:
+ cache = getattr(self, wrapper_name)
+ return cache(*args, **kwargs)
+
+ return proxy
+
+
+def apply(transform):
+ """
+ Decorate a function with a transform function that is
+ invoked on results returned from the decorated function.
+
+ >>> @apply(reversed)
+ ... def get_numbers(start):
+ ... "doc for get_numbers"
+ ... return range(start, start+3)
+ >>> list(get_numbers(4))
+ [6, 5, 4]
+ >>> get_numbers.__doc__
+ 'doc for get_numbers'
+ """
+
+ def wrap(func):
+ return functools.wraps(func)(compose(transform, func))
+
+ return wrap
+
+
+def result_invoke(action):
+ r"""
+ Decorate a function with an action function that is
+ invoked on the results returned from the decorated
+ function (for its side-effect), then return the original
+ result.
+
+ >>> @result_invoke(print)
+ ... def add_two(a, b):
+ ... return a + b
+ >>> x = add_two(2, 3)
+ 5
+ >>> x
+ 5
+ """
+
+ def wrap(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ result = func(*args, **kwargs)
+ action(result)
+ return result
+
+ return wrapper
+
+ return wrap
+
+
+def call_aside(f, *args, **kwargs):
+ """
+ Call a function for its side effect after initialization.
+
+ >>> @call_aside
+ ... def func(): print("called")
+ called
+ >>> func()
+ called
+
+ Use functools.partial to pass parameters to the initial call
+
+ >>> @functools.partial(call_aside, name='bingo')
+ ... def func(name): print("called with", name)
+ called with bingo
+ """
+ f(*args, **kwargs)
+ return f
+
+
+class Throttler:
+ """
+ Rate-limit a function (or other callable)
+ """
+
+ def __init__(self, func, max_rate=float('Inf')):
+ if isinstance(func, Throttler):
+ func = func.func
+ self.func = func
+ self.max_rate = max_rate
+ self.reset()
+
+ def reset(self):
+ self.last_called = 0
+
+ def __call__(self, *args, **kwargs):
+ self._wait()
+ return self.func(*args, **kwargs)
+
+ def _wait(self):
+ "ensure at least 1/max_rate seconds from last call"
+ elapsed = time.time() - self.last_called
+ must_wait = 1 / self.max_rate - elapsed
+ time.sleep(max(0, must_wait))
+ self.last_called = time.time()
+
+ def __get__(self, obj, type=None):
+ return first_invoke(self._wait, functools.partial(self.func, obj))
+
+
+def first_invoke(func1, func2):
+ """
+ Return a function that when invoked will invoke func1 without
+ any parameters (for its side-effect) and then invoke func2
+ with whatever parameters were passed, returning its result.
+ """
+
+ def wrapper(*args, **kwargs):
+ func1()
+ return func2(*args, **kwargs)
+
+ return wrapper
+
+
+def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
+ """
+ Given a callable func, trap the indicated exceptions
+ for up to 'retries' times, invoking cleanup on the
+ exception. On the final attempt, allow any exceptions
+ to propagate.
+ """
+ attempts = itertools.count() if retries == float('inf') else range(retries)
+ for attempt in attempts:
+ try:
+ return func()
+ except trap:
+ cleanup()
+
+ return func()
+
+
+def retry(*r_args, **r_kwargs):
+ """
+ Decorator wrapper for retry_call. Accepts arguments to retry_call
+ except func and then returns a decorator for the decorated function.
+
+ Ex:
+
+ >>> @retry(retries=3)
+ ... def my_func(a, b):
+ ... "this is my funk"
+ ... print(a, b)
+ >>> my_func.__doc__
+ 'this is my funk'
+ """
+
+ def decorate(func):
+ @functools.wraps(func)
+ def wrapper(*f_args, **f_kwargs):
+ bound = functools.partial(func, *f_args, **f_kwargs)
+ return retry_call(bound, *r_args, **r_kwargs)
+
+ return wrapper
+
+ return decorate
+
+
+def print_yielded(func):
+ """
+ Convert a generator into a function that prints all yielded elements
+
+ >>> @print_yielded
+ ... def x():
+ ... yield 3; yield None
+ >>> x()
+ 3
+ None
+ """
+ print_all = functools.partial(map, print)
+ print_results = compose(more_itertools.consume, print_all, func)
+ return functools.wraps(func)(print_results)
+
+
+def pass_none(func):
+ """
+ Wrap func so it's not called if its first param is None
+
+ >>> print_text = pass_none(print)
+ >>> print_text('text')
+ text
+ >>> print_text(None)
+ """
+
+ @functools.wraps(func)
+ def wrapper(param, *args, **kwargs):
+ if param is not None:
+ return func(param, *args, **kwargs)
+
+ return wrapper
+
+
+def assign_params(func, namespace):
+ """
+ Assign parameters from namespace where func solicits.
+
+ >>> def func(x, y=3):
+ ... print(x, y)
+ >>> assigned = assign_params(func, dict(x=2, z=4))
+ >>> assigned()
+ 2 3
+
+ The usual errors are raised if a function doesn't receive
+ its required parameters:
+
+ >>> assigned = assign_params(func, dict(y=3, z=4))
+ >>> assigned()
+ Traceback (most recent call last):
+ TypeError: func() ...argument...
+
+ It even works on methods:
+
+ >>> class Handler:
+ ... def meth(self, arg):
+ ... print(arg)
+ >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
+ crystal
+ """
+ sig = inspect.signature(func)
+ params = sig.parameters.keys()
+ call_ns = {k: namespace[k] for k in params if k in namespace}
+ return functools.partial(func, **call_ns)
+
+
+def save_method_args(method):
+ """
+ Wrap a method such that when it is called, the args and kwargs are
+ saved on the method.
+
+ >>> class MyClass:
+ ... @save_method_args
+ ... def method(self, a, b):
+ ... print(a, b)
+ >>> my_ob = MyClass()
+ >>> my_ob.method(1, 2)
+ 1 2
+ >>> my_ob._saved_method.args
+ (1, 2)
+ >>> my_ob._saved_method.kwargs
+ {}
+ >>> my_ob.method(a=3, b='foo')
+ 3 foo
+ >>> my_ob._saved_method.args
+ ()
+ >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
+ True
+
+ The arguments are stored on the instance, allowing for
+ different instance to save different args.
+
+ >>> your_ob = MyClass()
+ >>> your_ob.method({str('x'): 3}, b=[4])
+ {'x': 3} [4]
+ >>> your_ob._saved_method.args
+ ({'x': 3},)
+ >>> my_ob._saved_method.args
+ ()
+ """
+ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
+
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ attr_name = '_saved_' + method.__name__
+ attr = args_and_kwargs(args, kwargs)
+ setattr(self, attr_name, attr)
+ return method(self, *args, **kwargs)
+
+ return wrapper
+
+
+def except_(*exceptions, replace=None, use=None):
+ """
+ Replace the indicated exceptions, if raised, with the indicated
+ literal replacement or evaluated expression (if present).
+
+ >>> safe_int = except_(ValueError)(int)
+ >>> safe_int('five')
+ >>> safe_int('5')
+ 5
+
+ Specify a literal replacement with ``replace``.
+
+ >>> safe_int_r = except_(ValueError, replace=0)(int)
+ >>> safe_int_r('five')
+ 0
+
+ Provide an expression to ``use`` to pass through particular parameters.
+
+ >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
+ >>> safe_int_pt('five')
+ 'five'
+
+ """
+
+ def decorate(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except exceptions:
+ try:
+ return eval(use)
+ except TypeError:
+ return replace
+
+ return wrapper
+
+ return decorate
diff --git a/pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt b/pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt
new file mode 100644
index 0000000..986f944
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt
@@ -0,0 +1,2 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
diff --git a/pkg_resources/_vendor/jaraco/text/__init__.py b/pkg_resources/_vendor/jaraco/text/__init__.py
new file mode 100644
index 0000000..c466378
--- /dev/null
+++ b/pkg_resources/_vendor/jaraco/text/__init__.py
@@ -0,0 +1,599 @@
+import re
+import itertools
+import textwrap
+import functools
+
+try:
+ from importlib.resources import files # type: ignore
+except ImportError: # pragma: nocover
+ from pkg_resources.extern.importlib_resources import files # type: ignore
+
+from pkg_resources.extern.jaraco.functools import compose, method_cache
+from pkg_resources.extern.jaraco.context import ExceptionTrap
+
+
+def substitution(old, new):
+ """
+ Return a function that will perform a substitution on a string
+ """
+ return lambda s: s.replace(old, new)
+
+
+def multi_substitution(*substitutions):
+ """
+ Take a sequence of pairs specifying substitutions, and create
+ a function that performs those substitutions.
+
+ >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
+ 'baz'
+ """
+ substitutions = itertools.starmap(substitution, substitutions)
+ # compose function applies last function first, so reverse the
+ # substitutions to get the expected order.
+ substitutions = reversed(tuple(substitutions))
+ return compose(*substitutions)
+
+
+class FoldedCase(str):
+ """
+ A case insensitive string class; behaves just like str
+ except compares equal when the only variation is case.
+
+ >>> s = FoldedCase('hello world')
+
+ >>> s == 'Hello World'
+ True
+
+ >>> 'Hello World' == s
+ True
+
+ >>> s != 'Hello World'
+ False
+
+ >>> s.index('O')
+ 4
+
+ >>> s.split('O')
+ ['hell', ' w', 'rld']
+
+ >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
+ ['alpha', 'Beta', 'GAMMA']
+
+ Sequence membership is straightforward.
+
+ >>> "Hello World" in [s]
+ True
+ >>> s in ["Hello World"]
+ True
+
+ You may test for set inclusion, but candidate and elements
+ must both be folded.
+
+ >>> FoldedCase("Hello World") in {s}
+ True
+ >>> s in {FoldedCase("Hello World")}
+ True
+
+ String inclusion works as long as the FoldedCase object
+ is on the right.
+
+ >>> "hello" in FoldedCase("Hello World")
+ True
+
+ But not if the FoldedCase object is on the left:
+
+ >>> FoldedCase('hello') in 'Hello World'
+ False
+
+ In that case, use ``in_``:
+
+ >>> FoldedCase('hello').in_('Hello World')
+ True
+
+ >>> FoldedCase('hello') > FoldedCase('Hello')
+ False
+ """
+
+ def __lt__(self, other):
+ return self.lower() < other.lower()
+
+ def __gt__(self, other):
+ return self.lower() > other.lower()
+
+ def __eq__(self, other):
+ return self.lower() == other.lower()
+
+ def __ne__(self, other):
+ return self.lower() != other.lower()
+
+ def __hash__(self):
+ return hash(self.lower())
+
+ def __contains__(self, other):
+ return super().lower().__contains__(other.lower())
+
+ def in_(self, other):
+ "Does self appear in other?"
+ return self in FoldedCase(other)
+
+ # cache lower since it's likely to be called frequently.
+ @method_cache
+ def lower(self):
+ return super().lower()
+
+ def index(self, sub):
+ return self.lower().index(sub.lower())
+
+ def split(self, splitter=' ', maxsplit=0):
+ pattern = re.compile(re.escape(splitter), re.I)
+ return pattern.split(self, maxsplit)
+
+
+# Python 3.8 compatibility
+_unicode_trap = ExceptionTrap(UnicodeDecodeError)
+
+
+@_unicode_trap.passes
+def is_decodable(value):
+ r"""
+ Return True if the supplied value is decodable (using the default
+ encoding).
+
+ >>> is_decodable(b'\xff')
+ False
+ >>> is_decodable(b'\x32')
+ True
+ """
+ value.decode()
+
+
+def is_binary(value):
+ r"""
+ Return True if the value appears to be binary (that is, it's a byte
+ string and isn't decodable).
+
+ >>> is_binary(b'\xff')
+ True
+ >>> is_binary('\xff')
+ False
+ """
+ return isinstance(value, bytes) and not is_decodable(value)
+
+
+def trim(s):
+ r"""
+ Trim something like a docstring to remove the whitespace that
+ is common due to indentation and formatting.
+
+ >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
+ 'foo = bar\n\tbar = baz'
+ """
+ return textwrap.dedent(s).strip()
+
+
+def wrap(s):
+ """
+ Wrap lines of text, retaining existing newlines as
+ paragraph markers.
+
+ >>> print(wrap(lorem_ipsum))
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
+ eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
+ minim veniam, quis nostrud exercitation ullamco laboris nisi ut
+ aliquip ex ea commodo consequat. Duis aute irure dolor in
+ reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
+ pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
+ culpa qui officia deserunt mollit anim id est laborum.
+ <BLANKLINE>
+ Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
+ varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
+ magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
+ gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
+ risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
+ eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
+ fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
+ a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
+ neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
+ sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
+ nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
+ quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
+ molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
+ """
+ paragraphs = s.splitlines()
+ wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
+ return '\n\n'.join(wrapped)
+
+
+def unwrap(s):
+ r"""
+ Given a multi-line string, return an unwrapped version.
+
+ >>> wrapped = wrap(lorem_ipsum)
+ >>> wrapped.count('\n')
+ 20
+ >>> unwrapped = unwrap(wrapped)
+ >>> unwrapped.count('\n')
+ 1
+ >>> print(unwrapped)
+ Lorem ipsum dolor sit amet, consectetur adipiscing ...
+ Curabitur pretium tincidunt lacus. Nulla gravida orci ...
+
+ """
+ paragraphs = re.split(r'\n\n+', s)
+ cleaned = (para.replace('\n', ' ') for para in paragraphs)
+ return '\n'.join(cleaned)
+
+
+
+
+class Splitter(object):
+ """object that will split a string with the given arguments for each call
+
+ >>> s = Splitter(',')
+ >>> s('hello, world, this is your, master calling')
+ ['hello', ' world', ' this is your', ' master calling']
+ """
+
+ def __init__(self, *args):
+ self.args = args
+
+ def __call__(self, s):
+ return s.split(*self.args)
+
+
+def indent(string, prefix=' ' * 4):
+ """
+ >>> indent('foo')
+ ' foo'
+ """
+ return prefix + string
+
+
+class WordSet(tuple):
+ """
+ Given an identifier, return the words that identifier represents,
+ whether in camel case, underscore-separated, etc.
+
+ >>> WordSet.parse("camelCase")
+ ('camel', 'Case')
+
+ >>> WordSet.parse("under_sep")
+ ('under', 'sep')
+
+ Acronyms should be retained
+
+ >>> WordSet.parse("firstSNL")
+ ('first', 'SNL')
+
+ >>> WordSet.parse("you_and_I")
+ ('you', 'and', 'I')
+
+ >>> WordSet.parse("A simple test")
+ ('A', 'simple', 'test')
+
+ Multiple caps should not interfere with the first cap of another word.
+
+ >>> WordSet.parse("myABCClass")
+ ('my', 'ABC', 'Class')
+
+ The result is a WordSet, so you can get the form you need.
+
+ >>> WordSet.parse("myABCClass").underscore_separated()
+ 'my_ABC_Class'
+
+ >>> WordSet.parse('a-command').camel_case()
+ 'ACommand'
+
+ >>> WordSet.parse('someIdentifier').lowered().space_separated()
+ 'some identifier'
+
+ Slices of the result should return another WordSet.
+
+ >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
+ 'out_of_context'
+
+ >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
+ 'word set'
+
+ >>> example = WordSet.parse('figured it out')
+ >>> example.headless_camel_case()
+ 'figuredItOut'
+ >>> example.dash_separated()
+ 'figured-it-out'
+
+ """
+
+ _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
+
+ def capitalized(self):
+ return WordSet(word.capitalize() for word in self)
+
+ def lowered(self):
+ return WordSet(word.lower() for word in self)
+
+ def camel_case(self):
+ return ''.join(self.capitalized())
+
+ def headless_camel_case(self):
+ words = iter(self)
+ first = next(words).lower()
+ new_words = itertools.chain((first,), WordSet(words).camel_case())
+ return ''.join(new_words)
+
+ def underscore_separated(self):
+ return '_'.join(self)
+
+ def dash_separated(self):
+ return '-'.join(self)
+
+ def space_separated(self):
+ return ' '.join(self)
+
+ def trim_right(self, item):
+ """
+ Remove the item from the end of the set.
+
+ >>> WordSet.parse('foo bar').trim_right('foo')
+ ('foo', 'bar')
+ >>> WordSet.parse('foo bar').trim_right('bar')
+ ('foo',)
+ >>> WordSet.parse('').trim_right('bar')
+ ()
+ """
+ return self[:-1] if self and self[-1] == item else self
+
+ def trim_left(self, item):
+ """
+ Remove the item from the beginning of the set.
+
+ >>> WordSet.parse('foo bar').trim_left('foo')
+ ('bar',)
+ >>> WordSet.parse('foo bar').trim_left('bar')
+ ('foo', 'bar')
+ >>> WordSet.parse('').trim_left('bar')
+ ()
+ """
+ return self[1:] if self and self[0] == item else self
+
+ def trim(self, item):
+ """
+ >>> WordSet.parse('foo bar').trim('foo')
+ ('bar',)
+ """
+ return self.trim_left(item).trim_right(item)
+
+ def __getitem__(self, item):
+ result = super(WordSet, self).__getitem__(item)
+ if isinstance(item, slice):
+ result = WordSet(result)
+ return result
+
+ @classmethod
+ def parse(cls, identifier):
+ matches = cls._pattern.finditer(identifier)
+ return WordSet(match.group(0) for match in matches)
+
+ @classmethod
+ def from_class_name(cls, subject):
+ return cls.parse(subject.__class__.__name__)
+
+
+# for backward compatibility
+words = WordSet.parse
+
+
+def simple_html_strip(s):
+ r"""
+ Remove HTML from the string `s`.
+
+ >>> str(simple_html_strip(''))
+ ''
+
+ >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
+ A stormy day in paradise
+
+ >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
+ Somebody tell the truth.
+
+ >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
+ What about
+ multiple lines?
+ """
+ html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
+ texts = (match.group(3) or '' for match in html_stripper.finditer(s))
+ return ''.join(texts)
+
+
+class SeparatedValues(str):
+ """
+ A string separated by a separator. Overrides __iter__ for getting
+ the values.
+
+ >>> list(SeparatedValues('a,b,c'))
+ ['a', 'b', 'c']
+
+ Whitespace is stripped and empty values are discarded.
+
+ >>> list(SeparatedValues(' a, b , c, '))
+ ['a', 'b', 'c']
+ """
+
+ separator = ','
+
+ def __iter__(self):
+ parts = self.split(self.separator)
+ return filter(None, (part.strip() for part in parts))
+
+
+class Stripper:
+ r"""
+ Given a series of lines, find the common prefix and strip it from them.
+
+ >>> lines = [
+ ... 'abcdefg\n',
+ ... 'abc\n',
+ ... 'abcde\n',
+ ... ]
+ >>> res = Stripper.strip_prefix(lines)
+ >>> res.prefix
+ 'abc'
+ >>> list(res.lines)
+ ['defg\n', '\n', 'de\n']
+
+ If no prefix is common, nothing should be stripped.
+
+ >>> lines = [
+ ... 'abcd\n',
+ ... '1234\n',
+ ... ]
+ >>> res = Stripper.strip_prefix(lines)
+ >>> res.prefix = ''
+ >>> list(res.lines)
+ ['abcd\n', '1234\n']
+ """
+
+ def __init__(self, prefix, lines):
+ self.prefix = prefix
+ self.lines = map(self, lines)
+
+ @classmethod
+ def strip_prefix(cls, lines):
+ prefix_lines, lines = itertools.tee(lines)
+ prefix = functools.reduce(cls.common_prefix, prefix_lines)
+ return cls(prefix, lines)
+
+ def __call__(self, line):
+ if not self.prefix:
+ return line
+ null, prefix, rest = line.partition(self.prefix)
+ return rest
+
+ @staticmethod
+ def common_prefix(s1, s2):
+ """
+ Return the common prefix of two lines.
+ """
+ index = min(len(s1), len(s2))
+ while s1[:index] != s2[:index]:
+ index -= 1
+ return s1[:index]
+
+
+def remove_prefix(text, prefix):
+ """
+ Remove the prefix from the text if it exists.
+
+ >>> remove_prefix('underwhelming performance', 'underwhelming ')
+ 'performance'
+
+ >>> remove_prefix('something special', 'sample')
+ 'something special'
+ """
+ null, prefix, rest = text.rpartition(prefix)
+ return rest
+
+
+def remove_suffix(text, suffix):
+ """
+ Remove the suffix from the text if it exists.
+
+ >>> remove_suffix('name.git', '.git')
+ 'name'
+
+ >>> remove_suffix('something special', 'sample')
+ 'something special'
+ """
+ rest, suffix, null = text.partition(suffix)
+ return rest
+
+
+def normalize_newlines(text):
+ r"""
+ Replace alternate newlines with the canonical newline.
+
+ >>> normalize_newlines('Lorem Ipsum\u2029')
+ 'Lorem Ipsum\n'
+ >>> normalize_newlines('Lorem Ipsum\r\n')
+ 'Lorem Ipsum\n'
+ >>> normalize_newlines('Lorem Ipsum\x85')
+ 'Lorem Ipsum\n'
+ """
+ newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
+ pattern = '|'.join(newlines)
+ return re.sub(pattern, '\n', text)
+
+
+def _nonblank(str):
+ return str and not str.startswith('#')
+
+
+@functools.singledispatch
+def yield_lines(iterable):
+ r"""
+ Yield valid lines of a string or iterable.
+
+ >>> list(yield_lines(''))
+ []
+ >>> list(yield_lines(['foo', 'bar']))
+ ['foo', 'bar']
+ >>> list(yield_lines('foo\nbar'))
+ ['foo', 'bar']
+ >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
+ ['foo', 'baz #comment']
+ >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
+ ['foo', 'bar', 'baz', 'bing']
+ """
+ return itertools.chain.from_iterable(map(yield_lines, iterable))
+
+
+@yield_lines.register(str)
+def _(text):
+ return filter(_nonblank, map(str.strip, text.splitlines()))
+
+
+def drop_comment(line):
+ """
+ Drop comments.
+
+ >>> drop_comment('foo # bar')
+ 'foo'
+
+ A hash without a space may be in a URL.
+
+ >>> drop_comment('http://example.com/foo#bar')
+ 'http://example.com/foo#bar'
+ """
+ return line.partition(' #')[0]
+
+
+def join_continuation(lines):
+ r"""
+ Join lines continued by a trailing backslash.
+
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
+ ['foobar', 'baz']
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
+ ['foobar', 'baz']
+ >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
+ ['foobarbaz']
+
+ Not sure why, but...
+ The character preceeding the backslash is also elided.
+
+ >>> list(join_continuation(['goo\\', 'dly']))
+ ['godly']
+
+ A terrible idea, but...
+ If no line is available to continue, suppress the lines.
+
+ >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
+ ['foo']
+ """
+ lines = iter(lines)
+ for item in lines:
+ while item.endswith('\\'):
+ try:
+ item = item[:-2].strip() + next(lines)
+ except StopIteration:
+ return
+ yield item
diff --git a/pkg_resources/_vendor/more_itertools-8.12.0.dist-info/top_level.txt b/pkg_resources/_vendor/more_itertools-8.12.0.dist-info/top_level.txt
new file mode 100644
index 0000000..a5035be
--- /dev/null
+++ b/pkg_resources/_vendor/more_itertools-8.12.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+more_itertools
diff --git a/pkg_resources/_vendor/more_itertools/__init__.py b/pkg_resources/_vendor/more_itertools/__init__.py
new file mode 100644
index 0000000..ea38bef
--- /dev/null
+++ b/pkg_resources/_vendor/more_itertools/__init__.py
@@ -0,0 +1,4 @@
+from .more import * # noqa
+from .recipes import * # noqa
+
+__version__ = '8.12.0'
diff --git a/pkg_resources/_vendor/more_itertools/more.py b/pkg_resources/_vendor/more_itertools/more.py
new file mode 100644
index 0000000..6b6a5ca
--- /dev/null
+++ b/pkg_resources/_vendor/more_itertools/more.py
@@ -0,0 +1,4316 @@
+import warnings
+
+from collections import Counter, defaultdict, deque, abc
+from collections.abc import Sequence
+from functools import partial, reduce, wraps
+from heapq import merge, heapify, heapreplace, heappop
+from itertools import (
+ chain,
+ compress,
+ count,
+ cycle,
+ dropwhile,
+ groupby,
+ islice,
+ repeat,
+ starmap,
+ takewhile,
+ tee,
+ zip_longest,
+)
+from math import exp, factorial, floor, log
+from queue import Empty, Queue
+from random import random, randrange, uniform
+from operator import itemgetter, mul, sub, gt, lt, ge, le
+from sys import hexversion, maxsize
+from time import monotonic
+
+from .recipes import (
+ consume,
+ flatten,
+ pairwise,
+ powerset,
+ take,
+ unique_everseen,
+)
+
+__all__ = [
+ 'AbortThread',
+ 'SequenceView',
+ 'UnequalIterablesError',
+ 'adjacent',
+ 'all_unique',
+ 'always_iterable',
+ 'always_reversible',
+ 'bucket',
+ 'callback_iter',
+ 'chunked',
+ 'chunked_even',
+ 'circular_shifts',
+ 'collapse',
+ 'collate',
+ 'combination_index',
+ 'consecutive_groups',
+ 'consumer',
+ 'count_cycle',
+ 'countable',
+ 'difference',
+ 'distinct_combinations',
+ 'distinct_permutations',
+ 'distribute',
+ 'divide',
+ 'duplicates_everseen',
+ 'duplicates_justseen',
+ 'exactly_n',
+ 'filter_except',
+ 'first',
+ 'groupby_transform',
+ 'ichunked',
+ 'ilen',
+ 'interleave',
+ 'interleave_evenly',
+ 'interleave_longest',
+ 'intersperse',
+ 'is_sorted',
+ 'islice_extended',
+ 'iterate',
+ 'last',
+ 'locate',
+ 'lstrip',
+ 'make_decorator',
+ 'map_except',
+ 'map_if',
+ 'map_reduce',
+ 'mark_ends',
+ 'minmax',
+ 'nth_or_last',
+ 'nth_permutation',
+ 'nth_product',
+ 'numeric_range',
+ 'one',
+ 'only',
+ 'padded',
+ 'partitions',
+ 'peekable',
+ 'permutation_index',
+ 'product_index',
+ 'raise_',
+ 'repeat_each',
+ 'repeat_last',
+ 'replace',
+ 'rlocate',
+ 'rstrip',
+ 'run_length',
+ 'sample',
+ 'seekable',
+ 'set_partitions',
+ 'side_effect',
+ 'sliced',
+ 'sort_together',
+ 'split_after',
+ 'split_at',
+ 'split_before',
+ 'split_into',
+ 'split_when',
+ 'spy',
+ 'stagger',
+ 'strip',
+ 'strictly_n',
+ 'substrings',
+ 'substrings_indexes',
+ 'time_limited',
+ 'unique_in_window',
+ 'unique_to_each',
+ 'unzip',
+ 'value_chain',
+ 'windowed',
+ 'windowed_complete',
+ 'with_iter',
+ 'zip_broadcast',
+ 'zip_equal',
+ 'zip_offset',
+]
+
+
+_marker = object()
+
+
+def chunked(iterable, n, strict=False):
+ """Break *iterable* into lists of length *n*:
+
+ >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
+ [[1, 2, 3], [4, 5, 6]]
+
+ By the default, the last yielded list will have fewer than *n* elements
+ if the length of *iterable* is not divisible by *n*:
+
+ >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
+ [[1, 2, 3], [4, 5, 6], [7, 8]]
+
+ To use a fill-in value instead, see the :func:`grouper` recipe.
+
+ If the length of *iterable* is not divisible by *n* and *strict* is
+ ``True``, then ``ValueError`` will be raised before the last
+ list is yielded.
+
+ """
+ iterator = iter(partial(take, n, iter(iterable)), [])
+ if strict:
+ if n is None:
+ raise ValueError('n must not be None when using strict mode.')
+
+ def ret():
+ for chunk in iterator:
+ if len(chunk) != n:
+ raise ValueError('iterable is not divisible by n.')
+ yield chunk
+
+ return iter(ret())
+ else:
+ return iterator
+
+
+def first(iterable, default=_marker):
+ """Return the first item of *iterable*, or *default* if *iterable* is
+ empty.
+
+ >>> first([0, 1, 2, 3])
+ 0
+ >>> first([], 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+
+ :func:`first` is useful when you have a generator of expensive-to-retrieve
+ values and want any arbitrary one. It is marginally shorter than
+ ``next(iter(iterable), default)``.
+
+ """
+ try:
+ return next(iter(iterable))
+ except StopIteration as e:
+ if default is _marker:
+ raise ValueError(
+ 'first() was called on an empty iterable, and no '
+ 'default value was provided.'
+ ) from e
+ return default
+
+
+def last(iterable, default=_marker):
+ """Return the last item of *iterable*, or *default* if *iterable* is
+ empty.
+
+ >>> last([0, 1, 2, 3])
+ 3
+ >>> last([], 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+ """
+ try:
+ if isinstance(iterable, Sequence):
+ return iterable[-1]
+ # Work around https://bugs.python.org/issue38525
+ elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
+ return next(reversed(iterable))
+ else:
+ return deque(iterable, maxlen=1)[-1]
+ except (IndexError, TypeError, StopIteration):
+ if default is _marker:
+ raise ValueError(
+ 'last() was called on an empty iterable, and no default was '
+ 'provided.'
+ )
+ return default
+
+
+def nth_or_last(iterable, n, default=_marker):
+ """Return the nth or the last item of *iterable*,
+ or *default* if *iterable* is empty.
+
+ >>> nth_or_last([0, 1, 2, 3], 2)
+ 2
+ >>> nth_or_last([0, 1], 2)
+ 1
+ >>> nth_or_last([], 0, 'some default')
+ 'some default'
+
+ If *default* is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+ """
+ return last(islice(iterable, n + 1), default=default)
+
+
+class peekable:
+ """Wrap an iterator to allow lookahead and prepending elements.
+
+ Call :meth:`peek` on the result to get the value that will be returned
+ by :func:`next`. This won't advance the iterator:
+
+ >>> p = peekable(['a', 'b'])
+ >>> p.peek()
+ 'a'
+ >>> next(p)
+ 'a'
+
+ Pass :meth:`peek` a default value to return that instead of raising
+ ``StopIteration`` when the iterator is exhausted.
+
+ >>> p = peekable([])
+ >>> p.peek('hi')
+ 'hi'
+
+ peekables also offer a :meth:`prepend` method, which "inserts" items
+ at the head of the iterable:
+
+ >>> p = peekable([1, 2, 3])
+ >>> p.prepend(10, 11, 12)
+ >>> next(p)
+ 10
+ >>> p.peek()
+ 11
+ >>> list(p)
+ [11, 12, 1, 2, 3]
+
+ peekables can be indexed. Index 0 is the item that will be returned by
+ :func:`next`, index 1 is the item after that, and so on:
+ The values up to the given index will be cached.
+
+ >>> p = peekable(['a', 'b', 'c', 'd'])
+ >>> p[0]
+ 'a'
+ >>> p[1]
+ 'b'
+ >>> next(p)
+ 'a'
+
+ Negative indexes are supported, but be aware that they will cache the
+ remaining items in the source iterator, which may require significant
+ storage.
+
+ To check whether a peekable is exhausted, check its truth value:
+
+ >>> p = peekable(['a', 'b'])
+ >>> if p: # peekable has items
+ ... list(p)
+ ['a', 'b']
+ >>> if not p: # peekable is exhausted
+ ... list(p)
+ []
+
+ """
+
+ def __init__(self, iterable):
+ self._it = iter(iterable)
+ self._cache = deque()
+
+ def __iter__(self):
+ return self
+
+ def __bool__(self):
+ try:
+ self.peek()
+ except StopIteration:
+ return False
+ return True
+
+ def peek(self, default=_marker):
+ """Return the item that will be next returned from ``next()``.
+
+ Return ``default`` if there are no items left. If ``default`` is not
+ provided, raise ``StopIteration``.
+
+ """
+ if not self._cache:
+ try:
+ self._cache.append(next(self._it))
+ except StopIteration:
+ if default is _marker:
+ raise
+ return default
+ return self._cache[0]
+
+ def prepend(self, *items):
+ """Stack up items to be the next ones returned from ``next()`` or
+ ``self.peek()``. The items will be returned in
+ first in, first out order::
+
+ >>> p = peekable([1, 2, 3])
+ >>> p.prepend(10, 11, 12)
+ >>> next(p)
+ 10
+ >>> list(p)
+ [11, 12, 1, 2, 3]
+
+ It is possible, by prepending items, to "resurrect" a peekable that
+ previously raised ``StopIteration``.
+
+ >>> p = peekable([])
+ >>> next(p)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ >>> p.prepend(1)
+ >>> next(p)
+ 1
+ >>> next(p)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+
+ """
+ self._cache.extendleft(reversed(items))
+
+ def __next__(self):
+ if self._cache:
+ return self._cache.popleft()
+
+ return next(self._it)
+
+ def _get_slice(self, index):
+ # Normalize the slice's arguments
+ step = 1 if (index.step is None) else index.step
+ if step > 0:
+ start = 0 if (index.start is None) else index.start
+ stop = maxsize if (index.stop is None) else index.stop
+ elif step < 0:
+ start = -1 if (index.start is None) else index.start
+ stop = (-maxsize - 1) if (index.stop is None) else index.stop
+ else:
+ raise ValueError('slice step cannot be zero')
+
+ # If either the start or stop index is negative, we'll need to cache
+ # the rest of the iterable in order to slice from the right side.
+ if (start < 0) or (stop < 0):
+ self._cache.extend(self._it)
+ # Otherwise we'll need to find the rightmost index and cache to that
+ # point.
+ else:
+ n = min(max(start, stop) + 1, maxsize)
+ cache_len = len(self._cache)
+ if n >= cache_len:
+ self._cache.extend(islice(self._it, n - cache_len))
+
+ return list(self._cache)[index]
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return self._get_slice(index)
+
+ cache_len = len(self._cache)
+ if index < 0:
+ self._cache.extend(self._it)
+ elif index >= cache_len:
+ self._cache.extend(islice(self._it, index + 1 - cache_len))
+
+ return self._cache[index]
+
+
+def collate(*iterables, **kwargs):
+ """Return a sorted merge of the items from each of several already-sorted
+ *iterables*.
+
+ >>> list(collate('ACDZ', 'AZ', 'JKL'))
+ ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
+
+ Works lazily, keeping only the next value from each iterable in memory. Use
+ :func:`collate` to, for example, perform a n-way mergesort of items that
+ don't fit in memory.
+
+ If a *key* function is specified, the iterables will be sorted according
+ to its result:
+
+ >>> key = lambda s: int(s) # Sort by numeric value, not by string
+ >>> list(collate(['1', '10'], ['2', '11'], key=key))
+ ['1', '2', '10', '11']
+
+
+ If the *iterables* are sorted in descending order, set *reverse* to
+ ``True``:
+
+ >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
+ [5, 4, 3, 2, 1, 0]
+
+ If the elements of the passed-in iterables are out of order, you might get
+ unexpected results.
+
+ On Python 3.5+, this function is an alias for :func:`heapq.merge`.
+
+ """
+ warnings.warn(
+ "collate is no longer part of more_itertools, use heapq.merge",
+ DeprecationWarning,
+ )
+ return merge(*iterables, **kwargs)
+
+
+def consumer(func):
+ """Decorator that automatically advances a PEP-342-style "reverse iterator"
+ to its first yield point so you don't have to call ``next()`` on it
+ manually.
+
+ >>> @consumer
+ ... def tally():
+ ... i = 0
+ ... while True:
+ ... print('Thing number %s is %s.' % (i, (yield)))
+ ... i += 1
+ ...
+ >>> t = tally()
+ >>> t.send('red')
+ Thing number 0 is red.
+ >>> t.send('fish')
+ Thing number 1 is fish.
+
+ Without the decorator, you would have to call ``next(t)`` before
+ ``t.send()`` could be used.
+
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ gen = func(*args, **kwargs)
+ next(gen)
+ return gen
+
+ return wrapper
+
+
+def ilen(iterable):
+ """Return the number of items in *iterable*.
+
+ >>> ilen(x for x in range(1000000) if x % 3 == 0)
+ 333334
+
+ This consumes the iterable, so handle with care.
+
+ """
+ # This approach was selected because benchmarks showed it's likely the
+ # fastest of the known implementations at the time of writing.
+ # See GitHub tracker: #236, #230.
+ counter = count()
+ deque(zip(iterable, counter), maxlen=0)
+ return next(counter)
+
+
+def iterate(func, start):
+ """Return ``start``, ``func(start)``, ``func(func(start))``, ...
+
+ >>> from itertools import islice
+ >>> list(islice(iterate(lambda x: 2*x, 1), 10))
+ [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+ """
+ while True:
+ yield start
+ start = func(start)
+
+
+def with_iter(context_manager):
+ """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
+
+ For example, this will close the file when the iterator is exhausted::
+
+ upper_lines = (line.upper() for line in with_iter(open('foo')))
+
+ Any context manager which returns an iterable is a candidate for
+ ``with_iter``.
+
+ """
+ with context_manager as iterable:
+ yield from iterable
+
+
+def one(iterable, too_short=None, too_long=None):
+ """Return the first item from *iterable*, which is expected to contain only
+ that item. Raise an exception if *iterable* is empty or has more than one
+ item.
+
+ :func:`one` is useful for ensuring that an iterable contains only one item.
+ For example, it can be used to retrieve the result of a database query
+ that is expected to return a single row.
+
+ If *iterable* is empty, ``ValueError`` will be raised. You may specify a
+ different exception with the *too_short* keyword:
+
+ >>> it = []
+ >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too many items in iterable (expected 1)'
+ >>> too_short = IndexError('too few items')
+ >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ IndexError: too few items
+
+ Similarly, if *iterable* contains more than one item, ``ValueError`` will
+ be raised. You may specify a different exception with the *too_long*
+ keyword:
+
+ >>> it = ['too', 'many']
+ >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected exactly one item in iterable, but got 'too',
+ 'many', and perhaps more.
+ >>> too_long = RuntimeError
+ >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ RuntimeError
+
+ Note that :func:`one` attempts to advance *iterable* twice to ensure there
+ is only one item. See :func:`spy` or :func:`peekable` to check iterable
+ contents less destructively.
+
+ """
+ it = iter(iterable)
+
+ try:
+ first_value = next(it)
+ except StopIteration as e:
+ raise (
+ too_short or ValueError('too few items in iterable (expected 1)')
+ ) from e
+
+ try:
+ second_value = next(it)
+ except StopIteration:
+ pass
+ else:
+ msg = (
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
+ 'and perhaps more.'.format(first_value, second_value)
+ )
+ raise too_long or ValueError(msg)
+
+ return first_value
+
+
+def raise_(exception, *args):
+ raise exception(*args)
+
+
+def strictly_n(iterable, n, too_short=None, too_long=None):
+ """Validate that *iterable* has exactly *n* items and return them if
+ it does. If it has fewer than *n* items, call function *too_short*
+ with those items. If it has more than *n* items, call function
+ *too_long* with the first ``n + 1`` items.
+
+ >>> iterable = ['a', 'b', 'c', 'd']
+ >>> n = 4
+ >>> list(strictly_n(iterable, n))
+ ['a', 'b', 'c', 'd']
+
+ By default, *too_short* and *too_long* are functions that raise
+ ``ValueError``.
+
+ >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too few items in iterable (got 2)
+
+ >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: too many items in iterable (got at least 3)
+
+ You can instead supply functions that do something else.
+ *too_short* will be called with the number of items in *iterable*.
+ *too_long* will be called with `n + 1`.
+
+ >>> def too_short(item_count):
+ ... raise RuntimeError
+ >>> it = strictly_n('abcd', 6, too_short=too_short)
+ >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ RuntimeError
+
+ >>> def too_long(item_count):
+ ... print('The boss is going to hear about this')
+ >>> it = strictly_n('abcdef', 4, too_long=too_long)
+ >>> list(it)
+ The boss is going to hear about this
+ ['a', 'b', 'c', 'd']
+
+ """
+ if too_short is None:
+ too_short = lambda item_count: raise_(
+ ValueError,
+ 'Too few items in iterable (got {})'.format(item_count),
+ )
+
+ if too_long is None:
+ too_long = lambda item_count: raise_(
+ ValueError,
+ 'Too many items in iterable (got at least {})'.format(item_count),
+ )
+
+ it = iter(iterable)
+ for i in range(n):
+ try:
+ item = next(it)
+ except StopIteration:
+ too_short(i)
+ return
+ else:
+ yield item
+
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ else:
+ too_long(n + 1)
+
+
+def distinct_permutations(iterable, r=None):
+ """Yield successive distinct permutations of the elements in *iterable*.
+
+ >>> sorted(distinct_permutations([1, 0, 1]))
+ [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
+
+ Equivalent to ``set(permutations(iterable))``, except duplicates are not
+ generated and thrown away. For larger input sequences this is much more
+ efficient.
+
+ Duplicate permutations arise when there are duplicated elements in the
+ input iterable. The number of items returned is
+ `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
+ items input, and each `x_i` is the count of a distinct item in the input
+ sequence.
+
+ If *r* is given, only the *r*-length permutations are yielded.
+
+ >>> sorted(distinct_permutations([1, 0, 1], r=2))
+ [(0, 1), (1, 0), (1, 1)]
+ >>> sorted(distinct_permutations(range(3), r=2))
+ [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
+
+ """
+ # Algorithm: https://w.wiki/Qai
+ def _full(A):
+ while True:
+ # Yield the permutation we have
+ yield tuple(A)
+
+ # Find the largest index i such that A[i] < A[i + 1]
+ for i in range(size - 2, -1, -1):
+ if A[i] < A[i + 1]:
+ break
+ # If no such index exists, this permutation is the last one
+ else:
+ return
+
+ # Find the largest index j greater than j such that A[i] < A[j]
+ for j in range(size - 1, i, -1):
+ if A[i] < A[j]:
+ break
+
+ # Swap the value of A[i] with that of A[j], then reverse the
+ # sequence from A[i + 1] to form the new permutation
+ A[i], A[j] = A[j], A[i]
+ A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
+
+ # Algorithm: modified from the above
+ def _partial(A, r):
+ # Split A into the first r items and the last r items
+ head, tail = A[:r], A[r:]
+ right_head_indexes = range(r - 1, -1, -1)
+ left_tail_indexes = range(len(tail))
+
+ while True:
+ # Yield the permutation we have
+ yield tuple(head)
+
+ # Starting from the right, find the first index of the head with
+ # value smaller than the maximum value of the tail - call it i.
+ pivot = tail[-1]
+ for i in right_head_indexes:
+ if head[i] < pivot:
+ break
+ pivot = head[i]
+ else:
+ return
+
+ # Starting from the left, find the first value of the tail
+ # with a value greater than head[i] and swap.
+ for j in left_tail_indexes:
+ if tail[j] > head[i]:
+ head[i], tail[j] = tail[j], head[i]
+ break
+ # If we didn't find one, start from the right and find the first
+ # index of the head with a value greater than head[i] and swap.
+ else:
+ for j in right_head_indexes:
+ if head[j] > head[i]:
+ head[i], head[j] = head[j], head[i]
+ break
+
+ # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
+ tail += head[: i - r : -1] # head[i + 1:][::-1]
+ i += 1
+ head[i:], tail[:] = tail[: r - i], tail[r - i :]
+
+ items = sorted(iterable)
+
+ size = len(items)
+ if r is None:
+ r = size
+
+ if 0 < r <= size:
+ return _full(items) if (r == size) else _partial(items, r)
+
+ return iter(() if r else ((),))
+
+
+def intersperse(e, iterable, n=1):
+ """Intersperse filler element *e* among the items in *iterable*, leaving
+ *n* items between each filler element.
+
+ >>> list(intersperse('!', [1, 2, 3, 4, 5]))
+ [1, '!', 2, '!', 3, '!', 4, '!', 5]
+
+ >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
+ [1, 2, None, 3, 4, None, 5]
+
+ """
+ if n == 0:
+ raise ValueError('n must be > 0')
+ elif n == 1:
+ # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
+ # islice(..., 1, None) -> x_0, e, x_1, e, x_2...
+ return islice(interleave(repeat(e), iterable), 1, None)
+ else:
+ # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
+ # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
+ # flatten(...) -> x_0, x_1, e, x_2, x_3...
+ filler = repeat([e])
+ chunks = chunked(iterable, n)
+ return flatten(islice(interleave(filler, chunks), 1, None))
+
+
+def unique_to_each(*iterables):
+ """Return the elements from each of the input iterables that aren't in the
+ other input iterables.
+
+ For example, suppose you have a set of packages, each with a set of
+ dependencies::
+
+ {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
+
+ If you remove one package, which dependencies can also be removed?
+
+ If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
+ associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
+ ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
+
+ >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
+ [['A'], ['C'], ['D']]
+
+ If there are duplicates in one input iterable that aren't in the others
+ they will be duplicated in the output. Input order is preserved::
+
+ >>> unique_to_each("mississippi", "missouri")
+ [['p', 'p'], ['o', 'u', 'r']]
+
+ It is assumed that the elements of each iterable are hashable.
+
+ """
+ pool = [list(it) for it in iterables]
+ counts = Counter(chain.from_iterable(map(set, pool)))
+ uniques = {element for element in counts if counts[element] == 1}
+ return [list(filter(uniques.__contains__, it)) for it in pool]
+
+
+def windowed(seq, n, fillvalue=None, step=1):
+ """Return a sliding window of width *n* over the given iterable.
+
+ >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
+ >>> list(all_windows)
+ [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
+
+ When the window is larger than the iterable, *fillvalue* is used in place
+ of missing values:
+
+ >>> list(windowed([1, 2, 3], 4))
+ [(1, 2, 3, None)]
+
+ Each window will advance in increments of *step*:
+
+ >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
+ [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
+
+ To slide into the iterable's items, use :func:`chain` to add filler items
+ to the left:
+
+ >>> iterable = [1, 2, 3, 4]
+ >>> n = 3
+ >>> padding = [None] * (n - 1)
+ >>> list(windowed(chain(padding, iterable), 3))
+ [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
+ """
+ if n < 0:
+ raise ValueError('n must be >= 0')
+ if n == 0:
+ yield tuple()
+ return
+ if step < 1:
+ raise ValueError('step must be >= 1')
+
+ window = deque(maxlen=n)
+ i = n
+ for _ in map(window.append, seq):
+ i -= 1
+ if not i:
+ i = step
+ yield tuple(window)
+
+ size = len(window)
+ if size < n:
+ yield tuple(chain(window, repeat(fillvalue, n - size)))
+ elif 0 < i < min(step, n):
+ window += (fillvalue,) * i
+ yield tuple(window)
+
+
+def substrings(iterable):
+ """Yield all of the substrings of *iterable*.
+
+ >>> [''.join(s) for s in substrings('more')]
+ ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
+
+ Note that non-string iterables can also be subdivided.
+
+ >>> list(substrings([0, 1, 2]))
+ [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
+
+ """
+ # The length-1 substrings
+ seq = []
+ for item in iter(iterable):
+ seq.append(item)
+ yield (item,)
+ seq = tuple(seq)
+ item_count = len(seq)
+
+ # And the rest
+ for n in range(2, item_count + 1):
+ for i in range(item_count - n + 1):
+ yield seq[i : i + n]
+
+
+def substrings_indexes(seq, reverse=False):
+ """Yield all substrings and their positions in *seq*
+
+ The items yielded will be a tuple of the form ``(substr, i, j)``, where
+ ``substr == seq[i:j]``.
+
+ This function only works for iterables that support slicing, such as
+ ``str`` objects.
+
+ >>> for item in substrings_indexes('more'):
+ ... print(item)
+ ('m', 0, 1)
+ ('o', 1, 2)
+ ('r', 2, 3)
+ ('e', 3, 4)
+ ('mo', 0, 2)
+ ('or', 1, 3)
+ ('re', 2, 4)
+ ('mor', 0, 3)
+ ('ore', 1, 4)
+ ('more', 0, 4)
+
+ Set *reverse* to ``True`` to yield the same items in the opposite order.
+
+
+ """
+ r = range(1, len(seq) + 1)
+ if reverse:
+ r = reversed(r)
+ return (
+ (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
+ )
+
+
+class bucket:
+ """Wrap *iterable* and return an object that buckets it iterable into
+ child iterables based on a *key* function.
+
+ >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
+ >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
+ >>> sorted(list(s)) # Get the keys
+ ['a', 'b', 'c']
+ >>> a_iterable = s['a']
+ >>> next(a_iterable)
+ 'a1'
+ >>> next(a_iterable)
+ 'a2'
+ >>> list(s['b'])
+ ['b1', 'b2', 'b3']
+
+ The original iterable will be advanced and its items will be cached until
+ they are used by the child iterables. This may require significant storage.
+
+ By default, attempting to select a bucket to which no items belong will
+ exhaust the iterable and cache all values.
+ If you specify a *validator* function, selected buckets will instead be
+ checked against it.
+
+ >>> from itertools import count
+ >>> it = count(1, 2) # Infinite sequence of odd numbers
+ >>> key = lambda x: x % 10 # Bucket by last digit
+ >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
+ >>> s = bucket(it, key=key, validator=validator)
+ >>> 2 in s
+ False
+ >>> list(s[2])
+ []
+
+ """
+
+ def __init__(self, iterable, key, validator=None):
+ self._it = iter(iterable)
+ self._key = key
+ self._cache = defaultdict(deque)
+ self._validator = validator or (lambda x: True)
+
+ def __contains__(self, value):
+ if not self._validator(value):
+ return False
+
+ try:
+ item = next(self[value])
+ except StopIteration:
+ return False
+ else:
+ self._cache[value].appendleft(item)
+
+ return True
+
+ def _get_values(self, value):
+ """
+ Helper to yield items from the parent iterator that match *value*.
+ Items that don't match are stored in the local cache as they
+ are encountered.
+ """
+ while True:
+ # If we've cached some items that match the target value, emit
+ # the first one and evict it from the cache.
+ if self._cache[value]:
+ yield self._cache[value].popleft()
+ # Otherwise we need to advance the parent iterator to search for
+ # a matching item, caching the rest.
+ else:
+ while True:
+ try:
+ item = next(self._it)
+ except StopIteration:
+ return
+ item_value = self._key(item)
+ if item_value == value:
+ yield item
+ break
+ elif self._validator(item_value):
+ self._cache[item_value].append(item)
+
+ def __iter__(self):
+ for item in self._it:
+ item_value = self._key(item)
+ if self._validator(item_value):
+ self._cache[item_value].append(item)
+
+ yield from self._cache.keys()
+
+ def __getitem__(self, value):
+ if not self._validator(value):
+ return iter(())
+
+ return self._get_values(value)
+
+
+def spy(iterable, n=1):
+ """Return a 2-tuple with a list containing the first *n* elements of
+ *iterable*, and an iterator with the same items as *iterable*.
+ This allows you to "look ahead" at the items in the iterable without
+ advancing it.
+
+ There is one item in the list by default:
+
+ >>> iterable = 'abcdefg'
+ >>> head, iterable = spy(iterable)
+ >>> head
+ ['a']
+ >>> list(iterable)
+ ['a', 'b', 'c', 'd', 'e', 'f', 'g']
+
+ You may use unpacking to retrieve items instead of lists:
+
+ >>> (head,), iterable = spy('abcdefg')
+ >>> head
+ 'a'
+ >>> (first, second), iterable = spy('abcdefg', 2)
+ >>> first
+ 'a'
+ >>> second
+ 'b'
+
+ The number of items requested can be larger than the number of items in
+ the iterable:
+
+ >>> iterable = [1, 2, 3, 4, 5]
+ >>> head, iterable = spy(iterable, 10)
+ >>> head
+ [1, 2, 3, 4, 5]
+ >>> list(iterable)
+ [1, 2, 3, 4, 5]
+
+ """
+ it = iter(iterable)
+ head = take(n, it)
+
+ return head.copy(), chain(head, it)
+
+
+def interleave(*iterables):
+ """Return a new iterable yielding from each iterable in turn,
+ until the shortest is exhausted.
+
+ >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
+ [1, 4, 6, 2, 5, 7]
+
+ For a version that doesn't terminate after the shortest iterable is
+ exhausted, see :func:`interleave_longest`.
+
+ """
+ return chain.from_iterable(zip(*iterables))
+
+
+def interleave_longest(*iterables):
+ """Return a new iterable yielding from each iterable in turn,
+ skipping any that are exhausted.
+
+ >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
+ [1, 4, 6, 2, 5, 7, 3, 8]
+
+ This function produces the same output as :func:`roundrobin`, but may
+ perform better for some inputs (in particular when the number of iterables
+ is large).
+
+ """
+ i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
+ return (x for x in i if x is not _marker)
+
+
+def interleave_evenly(iterables, lengths=None):
+ """
+ Interleave multiple iterables so that their elements are evenly distributed
+ throughout the output sequence.
+
+ >>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
+ >>> list(interleave_evenly(iterables))
+ [1, 2, 'a', 3, 4, 'b', 5]
+
+ >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
+ >>> list(interleave_evenly(iterables))
+ [1, 6, 4, 2, 7, 3, 8, 5]
+
+ This function requires iterables of known length. Iterables without
+ ``__len__()`` can be used by manually specifying lengths with *lengths*:
+
+ >>> from itertools import combinations, repeat
+ >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
+ >>> lengths = [4 * (4 - 1) // 2, 3]
+ >>> list(interleave_evenly(iterables, lengths=lengths))
+ [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
+
+ Based on Bresenham's algorithm.
+ """
+ if lengths is None:
+ try:
+ lengths = [len(it) for it in iterables]
+ except TypeError:
+ raise ValueError(
+ 'Iterable lengths could not be determined automatically. '
+ 'Specify them with the lengths keyword.'
+ )
+ elif len(iterables) != len(lengths):
+ raise ValueError('Mismatching number of iterables and lengths.')
+
+ dims = len(lengths)
+
+ # sort iterables by length, descending
+ lengths_permute = sorted(
+ range(dims), key=lambda i: lengths[i], reverse=True
+ )
+ lengths_desc = [lengths[i] for i in lengths_permute]
+ iters_desc = [iter(iterables[i]) for i in lengths_permute]
+
+ # the longest iterable is the primary one (Bresenham: the longest
+ # distance along an axis)
+ delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
+ iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
+ errors = [delta_primary // dims] * len(deltas_secondary)
+
+ to_yield = sum(lengths)
+ while to_yield:
+ yield next(iter_primary)
+ to_yield -= 1
+ # update errors for each secondary iterable
+ errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
+
+ # those iterables for which the error is negative are yielded
+ # ("diagonal step" in Bresenham)
+ for i, e in enumerate(errors):
+ if e < 0:
+ yield next(iters_secondary[i])
+ to_yield -= 1
+ errors[i] += delta_primary
+
+
+def collapse(iterable, base_type=None, levels=None):
+ """Flatten an iterable with multiple levels of nesting (e.g., a list of
+ lists of tuples) into non-iterable types.
+
+ >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
+ >>> list(collapse(iterable))
+ [1, 2, 3, 4, 5, 6]
+
+ Binary and text strings are not considered iterable and
+ will not be collapsed.
+
+ To avoid collapsing other types, specify *base_type*:
+
+ >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
+ >>> list(collapse(iterable, base_type=tuple))
+ ['ab', ('cd', 'ef'), 'gh', 'ij']
+
+ Specify *levels* to stop flattening after a certain level:
+
+ >>> iterable = [('a', ['b']), ('c', ['d'])]
+ >>> list(collapse(iterable)) # Fully flattened
+ ['a', 'b', 'c', 'd']
+ >>> list(collapse(iterable, levels=1)) # Only one level flattened
+ ['a', ['b'], 'c', ['d']]
+
+ """
+
+ def walk(node, level):
+ if (
+ ((levels is not None) and (level > levels))
+ or isinstance(node, (str, bytes))
+ or ((base_type is not None) and isinstance(node, base_type))
+ ):
+ yield node
+ return
+
+ try:
+ tree = iter(node)
+ except TypeError:
+ yield node
+ return
+ else:
+ for child in tree:
+ yield from walk(child, level + 1)
+
+ yield from walk(iterable, 0)
+
+
+def side_effect(func, iterable, chunk_size=None, before=None, after=None):
+ """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
+ of items) before yielding the item.
+
+ `func` must be a function that takes a single argument. Its return value
+ will be discarded.
+
+ *before* and *after* are optional functions that take no arguments. They
+ will be executed before iteration starts and after it ends, respectively.
+
+ `side_effect` can be used for logging, updating progress bars, or anything
+ that is not functionally "pure."
+
+ Emitting a status message:
+
+ >>> from more_itertools import consume
+ >>> func = lambda item: print('Received {}'.format(item))
+ >>> consume(side_effect(func, range(2)))
+ Received 0
+ Received 1
+
+ Operating on chunks of items:
+
+ >>> pair_sums = []
+ >>> func = lambda chunk: pair_sums.append(sum(chunk))
+ >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
+ [0, 1, 2, 3, 4, 5]
+ >>> list(pair_sums)
+ [1, 5, 9]
+
+ Writing to a file-like object:
+
+ >>> from io import StringIO
+ >>> from more_itertools import consume
+ >>> f = StringIO()
+ >>> func = lambda x: print(x, file=f)
+ >>> before = lambda: print(u'HEADER', file=f)
+ >>> after = f.close
+ >>> it = [u'a', u'b', u'c']
+ >>> consume(side_effect(func, it, before=before, after=after))
+ >>> f.closed
+ True
+
+ """
+ try:
+ if before is not None:
+ before()
+
+ if chunk_size is None:
+ for item in iterable:
+ func(item)
+ yield item
+ else:
+ for chunk in chunked(iterable, chunk_size):
+ func(chunk)
+ yield from chunk
+ finally:
+ if after is not None:
+ after()
+
+
+def sliced(seq, n, strict=False):
+ """Yield slices of length *n* from the sequence *seq*.
+
+ >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
+ [(1, 2, 3), (4, 5, 6)]
+
+ By the default, the last yielded slice will have fewer than *n* elements
+ if the length of *seq* is not divisible by *n*:
+
+ >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
+ [(1, 2, 3), (4, 5, 6), (7, 8)]
+
+ If the length of *seq* is not divisible by *n* and *strict* is
+ ``True``, then ``ValueError`` will be raised before the last
+ slice is yielded.
+
+ This function will only work for iterables that support slicing.
+ For non-sliceable iterables, see :func:`chunked`.
+
+ """
+ iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
+ if strict:
+
+ def ret():
+ for _slice in iterator:
+ if len(_slice) != n:
+ raise ValueError("seq is not divisible by n.")
+ yield _slice
+
+ return iter(ret())
+ else:
+ return iterator
+
+
+def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
+ """Yield lists of items from *iterable*, where each list is delimited by
+ an item where callable *pred* returns ``True``.
+
+ >>> list(split_at('abcdcba', lambda x: x == 'b'))
+ [['a'], ['c', 'd', 'c'], ['a']]
+
+ >>> list(split_at(range(10), lambda n: n % 2 == 1))
+ [[0], [2], [4], [6], [8], []]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
+ [[0], [2], [4, 5, 6, 7, 8, 9]]
+
+ By default, the delimiting items are not included in the output.
+ The include them, set *keep_separator* to ``True``.
+
+ >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
+ [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ if pred(item):
+ yield buf
+ if keep_separator:
+ yield [item]
+ if maxsplit == 1:
+ yield list(it)
+ return
+ buf = []
+ maxsplit -= 1
+ else:
+ buf.append(item)
+ yield buf
+
+
+def split_before(iterable, pred, maxsplit=-1):
+ """Yield lists of items from *iterable*, where each list ends just before
+ an item for which callable *pred* returns ``True``:
+
+ >>> list(split_before('OneTwo', lambda s: s.isupper()))
+ [['O', 'n', 'e'], ['T', 'w', 'o']]
+
+ >>> list(split_before(range(10), lambda n: n % 3 == 0))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ if pred(item) and buf:
+ yield buf
+ if maxsplit == 1:
+ yield [item] + list(it)
+ return
+ buf = []
+ maxsplit -= 1
+ buf.append(item)
+ if buf:
+ yield buf
+
+
+def split_after(iterable, pred, maxsplit=-1):
+ """Yield lists of items from *iterable*, where each list ends with an
+ item where callable *pred* returns ``True``:
+
+ >>> list(split_after('one1two2', lambda s: s.isdigit()))
+ [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
+
+ >>> list(split_after(range(10), lambda n: n % 3 == 0))
+ [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
+ [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ buf = []
+ it = iter(iterable)
+ for item in it:
+ buf.append(item)
+ if pred(item) and buf:
+ yield buf
+ if maxsplit == 1:
+ yield list(it)
+ return
+ buf = []
+ maxsplit -= 1
+ if buf:
+ yield buf
+
+
+def split_when(iterable, pred, maxsplit=-1):
+ """Split *iterable* into pieces based on the output of *pred*.
+ *pred* should be a function that takes successive pairs of items and
+ returns ``True`` if the iterable should be split in between them.
+
+ For example, to find runs of increasing numbers, split the iterable when
+ element ``i`` is larger than element ``i + 1``:
+
+ >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
+ [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
+
+ At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
+ then there is no limit on the number of splits:
+
+ >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
+ ... lambda x, y: x > y, maxsplit=2))
+ [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
+
+ """
+ if maxsplit == 0:
+ yield list(iterable)
+ return
+
+ it = iter(iterable)
+ try:
+ cur_item = next(it)
+ except StopIteration:
+ return
+
+ buf = [cur_item]
+ for next_item in it:
+ if pred(cur_item, next_item):
+ yield buf
+ if maxsplit == 1:
+ yield [next_item] + list(it)
+ return
+ buf = []
+ maxsplit -= 1
+
+ buf.append(next_item)
+ cur_item = next_item
+
+ yield buf
+
+
+def split_into(iterable, sizes):
+ """Yield a list of sequential items from *iterable* of length 'n' for each
+ integer 'n' in *sizes*.
+
+ >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
+ [[1], [2, 3], [4, 5, 6]]
+
+ If the sum of *sizes* is smaller than the length of *iterable*, then the
+ remaining items of *iterable* will not be returned.
+
+ >>> list(split_into([1,2,3,4,5,6], [2,3]))
+ [[1, 2], [3, 4, 5]]
+
+ If the sum of *sizes* is larger than the length of *iterable*, fewer items
+ will be returned in the iteration that overruns *iterable* and further
+ lists will be empty:
+
+ >>> list(split_into([1,2,3,4], [1,2,3,4]))
+ [[1], [2, 3], [4], []]
+
+ When a ``None`` object is encountered in *sizes*, the returned list will
+ contain items up to the end of *iterable* the same way that itertools.slice
+ does:
+
+ >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
+ [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
+
+ :func:`split_into` can be useful for grouping a series of items where the
+ sizes of the groups are not uniform. An example would be where in a row
+ from a table, multiple columns represent elements of the same feature
+ (e.g. a point represented by x,y,z) but, the format is not the same for
+ all columns.
+ """
+ # convert the iterable argument into an iterator so its contents can
+ # be consumed by islice in case it is a generator
+ it = iter(iterable)
+
+ for size in sizes:
+ if size is None:
+ yield list(it)
+ return
+ else:
+ yield list(islice(it, size))
+
+
+def padded(iterable, fillvalue=None, n=None, next_multiple=False):
+ """Yield the elements from *iterable*, followed by *fillvalue*, such that
+ at least *n* items are emitted.
+
+ >>> list(padded([1, 2, 3], '?', 5))
+ [1, 2, 3, '?', '?']
+
+ If *next_multiple* is ``True``, *fillvalue* will be emitted until the
+ number of items emitted is a multiple of *n*::
+
+ >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
+ [1, 2, 3, 4, None, None]
+
+ If *n* is ``None``, *fillvalue* will be emitted indefinitely.
+
+ """
+ it = iter(iterable)
+ if n is None:
+ yield from chain(it, repeat(fillvalue))
+ elif n < 1:
+ raise ValueError('n must be at least 1')
+ else:
+ item_count = 0
+ for item in it:
+ yield item
+ item_count += 1
+
+ remaining = (n - item_count) % n if next_multiple else n - item_count
+ for _ in range(remaining):
+ yield fillvalue
+
+
+def repeat_each(iterable, n=2):
+ """Repeat each element in *iterable* *n* times.
+
+ >>> list(repeat_each('ABC', 3))
+ ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
+ """
+ return chain.from_iterable(map(repeat, iterable, repeat(n)))
+
+
+def repeat_last(iterable, default=None):
+ """After the *iterable* is exhausted, keep yielding its last element.
+
+ >>> list(islice(repeat_last(range(3)), 5))
+ [0, 1, 2, 2, 2]
+
+ If the iterable is empty, yield *default* forever::
+
+ >>> list(islice(repeat_last(range(0), 42), 5))
+ [42, 42, 42, 42, 42]
+
+ """
+ item = _marker
+ for item in iterable:
+ yield item
+ final = default if item is _marker else item
+ yield from repeat(final)
+
+
+def distribute(n, iterable):
+ """Distribute the items from *iterable* among *n* smaller iterables.
+
+ >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
+ >>> list(group_1)
+ [1, 3, 5]
+ >>> list(group_2)
+ [2, 4, 6]
+
+ If the length of *iterable* is not evenly divisible by *n*, then the
+ length of the returned iterables will not be identical:
+
+ >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
+ >>> [list(c) for c in children]
+ [[1, 4, 7], [2, 5], [3, 6]]
+
+ If the length of *iterable* is smaller than *n*, then the last returned
+ iterables will be empty:
+
+ >>> children = distribute(5, [1, 2, 3])
+ >>> [list(c) for c in children]
+ [[1], [2], [3], [], []]
+
+ This function uses :func:`itertools.tee` and may require significant
+ storage. If you need the order items in the smaller iterables to match the
+ original iterable, see :func:`divide`.
+
+ """
+ if n < 1:
+ raise ValueError('n must be at least 1')
+
+ children = tee(iterable, n)
+ return [islice(it, index, None, n) for index, it in enumerate(children)]
+
+
+def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
+ """Yield tuples whose elements are offset from *iterable*.
+ The amount by which the `i`-th item in each tuple is offset is given by
+ the `i`-th item in *offsets*.
+
+ >>> list(stagger([0, 1, 2, 3]))
+ [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
+ >>> list(stagger(range(8), offsets=(0, 2, 4)))
+ [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
+
+ By default, the sequence will end when the final element of a tuple is the
+ last item in the iterable. To continue until the first element of a tuple
+ is the last item in the iterable, set *longest* to ``True``::
+
+ >>> list(stagger([0, 1, 2, 3], longest=True))
+ [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
+
+ By default, ``None`` will be used to replace offsets beyond the end of the
+ sequence. Specify *fillvalue* to use some other value.
+
+ """
+ children = tee(iterable, len(offsets))
+
+ return zip_offset(
+ *children, offsets=offsets, longest=longest, fillvalue=fillvalue
+ )
+
+
+class UnequalIterablesError(ValueError):
+ def __init__(self, details=None):
+ msg = 'Iterables have different lengths'
+ if details is not None:
+ msg += (': index 0 has length {}; index {} has length {}').format(
+ *details
+ )
+
+ super().__init__(msg)
+
+
+def _zip_equal_generator(iterables):
+ for combo in zip_longest(*iterables, fillvalue=_marker):
+ for val in combo:
+ if val is _marker:
+ raise UnequalIterablesError()
+ yield combo
+
+
+def _zip_equal(*iterables):
+ # Check whether the iterables are all the same size.
+ try:
+ first_size = len(iterables[0])
+ for i, it in enumerate(iterables[1:], 1):
+ size = len(it)
+ if size != first_size:
+ break
+ else:
+ # If we didn't break out, we can use the built-in zip.
+ return zip(*iterables)
+
+ # If we did break out, there was a mismatch.
+ raise UnequalIterablesError(details=(first_size, i, size))
+ # If any one of the iterables didn't have a length, start reading
+ # them until one runs out.
+ except TypeError:
+ return _zip_equal_generator(iterables)
+
+
+def zip_equal(*iterables):
+ """``zip`` the input *iterables* together, but raise
+ ``UnequalIterablesError`` if they aren't all the same length.
+
+ >>> it_1 = range(3)
+ >>> it_2 = iter('abc')
+ >>> list(zip_equal(it_1, it_2))
+ [(0, 'a'), (1, 'b'), (2, 'c')]
+
+ >>> it_1 = range(3)
+ >>> it_2 = iter('abcd')
+ >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ more_itertools.more.UnequalIterablesError: Iterables have different
+ lengths
+
+ """
+ if hexversion >= 0x30A00A6:
+ warnings.warn(
+ (
+ 'zip_equal will be removed in a future version of '
+ 'more-itertools. Use the builtin zip function with '
+ 'strict=True instead.'
+ ),
+ DeprecationWarning,
+ )
+
+ return _zip_equal(*iterables)
+
+
+def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
+ """``zip`` the input *iterables* together, but offset the `i`-th iterable
+ by the `i`-th item in *offsets*.
+
+ >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
+ [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
+
+ This can be used as a lightweight alternative to SciPy or pandas to analyze
+ data sets in which some series have a lead or lag relationship.
+
+ By default, the sequence will end when the shortest iterable is exhausted.
+ To continue until the longest iterable is exhausted, set *longest* to
+ ``True``.
+
+ >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
+ [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
+
+ By default, ``None`` will be used to replace offsets beyond the end of the
+ sequence. Specify *fillvalue* to use some other value.
+
+ """
+ if len(iterables) != len(offsets):
+ raise ValueError("Number of iterables and offsets didn't match")
+
+ staggered = []
+ for it, n in zip(iterables, offsets):
+ if n < 0:
+ staggered.append(chain(repeat(fillvalue, -n), it))
+ elif n > 0:
+ staggered.append(islice(it, n, None))
+ else:
+ staggered.append(it)
+
+ if longest:
+ return zip_longest(*staggered, fillvalue=fillvalue)
+
+ return zip(*staggered)
+
+
+def sort_together(iterables, key_list=(0,), key=None, reverse=False):
+ """Return the input iterables sorted together, with *key_list* as the
+ priority for sorting. All iterables are trimmed to the length of the
+ shortest one.
+
+ This can be used like the sorting function in a spreadsheet. If each
+ iterable represents a column of data, the key list determines which
+ columns are used for sorting.
+
+ By default, all iterables are sorted using the ``0``-th iterable::
+
+ >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
+ >>> sort_together(iterables)
+ [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
+
+ Set a different key list to sort according to another iterable.
+ Specifying multiple keys dictates how ties are broken::
+
+ >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
+ >>> sort_together(iterables, key_list=(1, 2))
+ [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
+
+ To sort by a function of the elements of the iterable, pass a *key*
+ function. Its arguments are the elements of the iterables corresponding to
+ the key list::
+
+ >>> names = ('a', 'b', 'c')
+ >>> lengths = (1, 2, 3)
+ >>> widths = (5, 2, 1)
+ >>> def area(length, width):
+ ... return length * width
+ >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
+ [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
+
+ Set *reverse* to ``True`` to sort in descending order.
+
+ >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
+ [(3, 2, 1), ('a', 'b', 'c')]
+
+ """
+ if key is None:
+ # if there is no key function, the key argument to sorted is an
+ # itemgetter
+ key_argument = itemgetter(*key_list)
+ else:
+ # if there is a key function, call it with the items at the offsets
+ # specified by the key function as arguments
+ key_list = list(key_list)
+ if len(key_list) == 1:
+ # if key_list contains a single item, pass the item at that offset
+ # as the only argument to the key function
+ key_offset = key_list[0]
+ key_argument = lambda zipped_items: key(zipped_items[key_offset])
+ else:
+ # if key_list contains multiple items, use itemgetter to return a
+ # tuple of items, which we pass as *args to the key function
+ get_key_items = itemgetter(*key_list)
+ key_argument = lambda zipped_items: key(
+ *get_key_items(zipped_items)
+ )
+
+ return list(
+ zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
+ )
+
+
+def unzip(iterable):
+ """The inverse of :func:`zip`, this function disaggregates the elements
+ of the zipped *iterable*.
+
+ The ``i``-th iterable contains the ``i``-th element from each element
+ of the zipped iterable. The first element is used to to determine the
+ length of the remaining elements.
+
+ >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+ >>> letters, numbers = unzip(iterable)
+ >>> list(letters)
+ ['a', 'b', 'c', 'd']
+ >>> list(numbers)
+ [1, 2, 3, 4]
+
+ This is similar to using ``zip(*iterable)``, but it avoids reading
+ *iterable* into memory. Note, however, that this function uses
+ :func:`itertools.tee` and thus may require significant storage.
+
+ """
+ head, iterable = spy(iter(iterable))
+ if not head:
+ # empty iterable, e.g. zip([], [], [])
+ return ()
+ # spy returns a one-length iterable as head
+ head = head[0]
+ iterables = tee(iterable, len(head))
+
+ def itemgetter(i):
+ def getter(obj):
+ try:
+ return obj[i]
+ except IndexError:
+ # basically if we have an iterable like
+ # iter([(1, 2, 3), (4, 5), (6,)])
+ # the second unzipped iterable would fail at the third tuple
+ # since it would try to access tup[1]
+ # same with the third unzipped iterable and the second tuple
+ # to support these "improperly zipped" iterables,
+ # we create a custom itemgetter
+ # which just stops the unzipped iterables
+ # at first length mismatch
+ raise StopIteration
+
+ return getter
+
+ return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
+
+
+def divide(n, iterable):
+ """Divide the elements from *iterable* into *n* parts, maintaining
+ order.
+
+ >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
+ >>> list(group_1)
+ [1, 2, 3]
+ >>> list(group_2)
+ [4, 5, 6]
+
+ If the length of *iterable* is not evenly divisible by *n*, then the
+ length of the returned iterables will not be identical:
+
+ >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
+ >>> [list(c) for c in children]
+ [[1, 2, 3], [4, 5], [6, 7]]
+
+ If the length of the iterable is smaller than n, then the last returned
+ iterables will be empty:
+
+ >>> children = divide(5, [1, 2, 3])
+ >>> [list(c) for c in children]
+ [[1], [2], [3], [], []]
+
+ This function will exhaust the iterable before returning and may require
+ significant storage. If order is not important, see :func:`distribute`,
+ which does not first pull the iterable into memory.
+
+ """
+ if n < 1:
+ raise ValueError('n must be at least 1')
+
+ try:
+ iterable[:0]
+ except TypeError:
+ seq = tuple(iterable)
+ else:
+ seq = iterable
+
+ q, r = divmod(len(seq), n)
+
+ ret = []
+ stop = 0
+ for i in range(1, n + 1):
+ start = stop
+ stop += q + 1 if i <= r else q
+ ret.append(iter(seq[start:stop]))
+
+ return ret
+
+
+def always_iterable(obj, base_type=(str, bytes)):
+ """If *obj* is iterable, return an iterator over its items::
+
+ >>> obj = (1, 2, 3)
+ >>> list(always_iterable(obj))
+ [1, 2, 3]
+
+ If *obj* is not iterable, return a one-item iterable containing *obj*::
+
+ >>> obj = 1
+ >>> list(always_iterable(obj))
+ [1]
+
+ If *obj* is ``None``, return an empty iterable:
+
+ >>> obj = None
+ >>> list(always_iterable(None))
+ []
+
+ By default, binary and text strings are not considered iterable::
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj))
+ ['foo']
+
+ If *base_type* is set, objects for which ``isinstance(obj, base_type)``
+ returns ``True`` won't be considered iterable.
+
+ >>> obj = {'a': 1}
+ >>> list(always_iterable(obj)) # Iterate over the dict's keys
+ ['a']
+ >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
+ [{'a': 1}]
+
+ Set *base_type* to ``None`` to avoid any special handling and treat objects
+ Python considers iterable as iterable:
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj, base_type=None))
+ ['f', 'o', 'o']
+ """
+ if obj is None:
+ return iter(())
+
+ if (base_type is not None) and isinstance(obj, base_type):
+ return iter((obj,))
+
+ try:
+ return iter(obj)
+ except TypeError:
+ return iter((obj,))
+
+
+def adjacent(predicate, iterable, distance=1):
+ """Return an iterable over `(bool, item)` tuples where the `item` is
+ drawn from *iterable* and the `bool` indicates whether
+ that item satisfies the *predicate* or is adjacent to an item that does.
+
+ For example, to find whether items are adjacent to a ``3``::
+
+ >>> list(adjacent(lambda x: x == 3, range(6)))
+ [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
+
+ Set *distance* to change what counts as adjacent. For example, to find
+ whether items are two places away from a ``3``:
+
+ >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
+ [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
+
+ This is useful for contextualizing the results of a search function.
+ For example, a code comparison tool might want to identify lines that
+ have changed, but also surrounding lines to give the viewer of the diff
+ context.
+
+ The predicate function will only be called once for each item in the
+ iterable.
+
+ See also :func:`groupby_transform`, which can be used with this function
+ to group ranges of items with the same `bool` value.
+
+ """
+ # Allow distance=0 mainly for testing that it reproduces results with map()
+ if distance < 0:
+ raise ValueError('distance must be at least 0')
+
+ i1, i2 = tee(iterable)
+ padding = [False] * distance
+ selected = chain(padding, map(predicate, i1), padding)
+ adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
+ return zip(adjacent_to_selected, i2)
+
+
+def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
+ """An extension of :func:`itertools.groupby` that can apply transformations
+ to the grouped data.
+
+ * *keyfunc* is a function computing a key value for each item in *iterable*
+ * *valuefunc* is a function that transforms the individual items from
+ *iterable* after grouping
+ * *reducefunc* is a function that transforms each group of items
+
+ >>> iterable = 'aAAbBBcCC'
+ >>> keyfunc = lambda k: k.upper()
+ >>> valuefunc = lambda v: v.lower()
+ >>> reducefunc = lambda g: ''.join(g)
+ >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
+ [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
+
+ Each optional argument defaults to an identity function if not specified.
+
+ :func:`groupby_transform` is useful when grouping elements of an iterable
+ using a separate iterable as the key. To do this, :func:`zip` the iterables
+ and pass a *keyfunc* that extracts the first element and a *valuefunc*
+ that extracts the second element::
+
+ >>> from operator import itemgetter
+ >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
+ >>> values = 'abcdefghi'
+ >>> iterable = zip(keys, values)
+ >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
+ >>> [(k, ''.join(g)) for k, g in grouper]
+ [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
+
+ Note that the order of items in the iterable is significant.
+ Only adjacent items are grouped together, so if you don't want any
+ duplicate groups, you should sort the iterable by the key function.
+
+ """
+ ret = groupby(iterable, keyfunc)
+ if valuefunc:
+ ret = ((k, map(valuefunc, g)) for k, g in ret)
+ if reducefunc:
+ ret = ((k, reducefunc(g)) for k, g in ret)
+
+ return ret
+
+
+class numeric_range(abc.Sequence, abc.Hashable):
+ """An extension of the built-in ``range()`` function whose arguments can
+ be any orderable numeric type.
+
+ With only *stop* specified, *start* defaults to ``0`` and *step*
+ defaults to ``1``. The output items will match the type of *stop*:
+
+ >>> list(numeric_range(3.5))
+ [0.0, 1.0, 2.0, 3.0]
+
+ With only *start* and *stop* specified, *step* defaults to ``1``. The
+ output items will match the type of *start*:
+
+ >>> from decimal import Decimal
+ >>> start = Decimal('2.1')
+ >>> stop = Decimal('5.1')
+ >>> list(numeric_range(start, stop))
+ [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
+
+ With *start*, *stop*, and *step* specified the output items will match
+ the type of ``start + step``:
+
+ >>> from fractions import Fraction
+ >>> start = Fraction(1, 2) # Start at 1/2
+ >>> stop = Fraction(5, 2) # End at 5/2
+ >>> step = Fraction(1, 2) # Count by 1/2
+ >>> list(numeric_range(start, stop, step))
+ [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
+
+ If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
+
+ >>> list(numeric_range(3, -1, -1.0))
+ [3.0, 2.0, 1.0, 0.0]
+
+ Be aware of the limitations of floating point numbers; the representation
+ of the yielded numbers may be surprising.
+
+ ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
+ is a ``datetime.timedelta`` object:
+
+ >>> import datetime
+ >>> start = datetime.datetime(2019, 1, 1)
+ >>> stop = datetime.datetime(2019, 1, 3)
+ >>> step = datetime.timedelta(days=1)
+ >>> items = iter(numeric_range(start, stop, step))
+ >>> next(items)
+ datetime.datetime(2019, 1, 1, 0, 0)
+ >>> next(items)
+ datetime.datetime(2019, 1, 2, 0, 0)
+
+ """
+
+ _EMPTY_HASH = hash(range(0, 0))
+
+ def __init__(self, *args):
+ argc = len(args)
+ if argc == 1:
+ (self._stop,) = args
+ self._start = type(self._stop)(0)
+ self._step = type(self._stop - self._start)(1)
+ elif argc == 2:
+ self._start, self._stop = args
+ self._step = type(self._stop - self._start)(1)
+ elif argc == 3:
+ self._start, self._stop, self._step = args
+ elif argc == 0:
+ raise TypeError(
+ 'numeric_range expected at least '
+ '1 argument, got {}'.format(argc)
+ )
+ else:
+ raise TypeError(
+ 'numeric_range expected at most '
+ '3 arguments, got {}'.format(argc)
+ )
+
+ self._zero = type(self._step)(0)
+ if self._step == self._zero:
+ raise ValueError('numeric_range() arg 3 must not be zero')
+ self._growing = self._step > self._zero
+ self._init_len()
+
+ def __bool__(self):
+ if self._growing:
+ return self._start < self._stop
+ else:
+ return self._start > self._stop
+
+ def __contains__(self, elem):
+ if self._growing:
+ if self._start <= elem < self._stop:
+ return (elem - self._start) % self._step == self._zero
+ else:
+ if self._start >= elem > self._stop:
+ return (self._start - elem) % (-self._step) == self._zero
+
+ return False
+
+ def __eq__(self, other):
+ if isinstance(other, numeric_range):
+ empty_self = not bool(self)
+ empty_other = not bool(other)
+ if empty_self or empty_other:
+ return empty_self and empty_other # True if both empty
+ else:
+ return (
+ self._start == other._start
+ and self._step == other._step
+ and self._get_by_index(-1) == other._get_by_index(-1)
+ )
+ else:
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self._get_by_index(key)
+ elif isinstance(key, slice):
+ step = self._step if key.step is None else key.step * self._step
+
+ if key.start is None or key.start <= -self._len:
+ start = self._start
+ elif key.start >= self._len:
+ start = self._stop
+ else: # -self._len < key.start < self._len
+ start = self._get_by_index(key.start)
+
+ if key.stop is None or key.stop >= self._len:
+ stop = self._stop
+ elif key.stop <= -self._len:
+ stop = self._start
+ else: # -self._len < key.stop < self._len
+ stop = self._get_by_index(key.stop)
+
+ return numeric_range(start, stop, step)
+ else:
+ raise TypeError(
+ 'numeric range indices must be '
+ 'integers or slices, not {}'.format(type(key).__name__)
+ )
+
+ def __hash__(self):
+ if self:
+ return hash((self._start, self._get_by_index(-1), self._step))
+ else:
+ return self._EMPTY_HASH
+
+ def __iter__(self):
+ values = (self._start + (n * self._step) for n in count())
+ if self._growing:
+ return takewhile(partial(gt, self._stop), values)
+ else:
+ return takewhile(partial(lt, self._stop), values)
+
+ def __len__(self):
+ return self._len
+
+ def _init_len(self):
+ if self._growing:
+ start = self._start
+ stop = self._stop
+ step = self._step
+ else:
+ start = self._stop
+ stop = self._start
+ step = -self._step
+ distance = stop - start
+ if distance <= self._zero:
+ self._len = 0
+ else: # distance > 0 and step > 0: regular euclidean division
+ q, r = divmod(distance, step)
+ self._len = int(q) + int(r != self._zero)
+
+ def __reduce__(self):
+ return numeric_range, (self._start, self._stop, self._step)
+
+ def __repr__(self):
+ if self._step == 1:
+ return "numeric_range({}, {})".format(
+ repr(self._start), repr(self._stop)
+ )
+ else:
+ return "numeric_range({}, {}, {})".format(
+ repr(self._start), repr(self._stop), repr(self._step)
+ )
+
+ def __reversed__(self):
+ return iter(
+ numeric_range(
+ self._get_by_index(-1), self._start - self._step, -self._step
+ )
+ )
+
+ def count(self, value):
+ return int(value in self)
+
+ def index(self, value):
+ if self._growing:
+ if self._start <= value < self._stop:
+ q, r = divmod(value - self._start, self._step)
+ if r == self._zero:
+ return int(q)
+ else:
+ if self._start >= value > self._stop:
+ q, r = divmod(self._start - value, -self._step)
+ if r == self._zero:
+ return int(q)
+
+ raise ValueError("{} is not in numeric range".format(value))
+
+ def _get_by_index(self, i):
+ if i < 0:
+ i += self._len
+ if i < 0 or i >= self._len:
+ raise IndexError("numeric range object index out of range")
+ return self._start + i * self._step
+
+
+def count_cycle(iterable, n=None):
+ """Cycle through the items from *iterable* up to *n* times, yielding
+ the number of completed cycles along with each item. If *n* is omitted the
+ process repeats indefinitely.
+
+ >>> list(count_cycle('AB', 3))
+ [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
+
+ """
+ iterable = tuple(iterable)
+ if not iterable:
+ return iter(())
+ counter = count() if n is None else range(n)
+ return ((i, item) for i in counter for item in iterable)
+
+
+def mark_ends(iterable):
+ """Yield 3-tuples of the form ``(is_first, is_last, item)``.
+
+ >>> list(mark_ends('ABC'))
+ [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
+
+ Use this when looping over an iterable to take special action on its first
+ and/or last items:
+
+ >>> iterable = ['Header', 100, 200, 'Footer']
+ >>> total = 0
+ >>> for is_first, is_last, item in mark_ends(iterable):
+ ... if is_first:
+ ... continue # Skip the header
+ ... if is_last:
+ ... continue # Skip the footer
+ ... total += item
+ >>> print(total)
+ 300
+ """
+ it = iter(iterable)
+
+ try:
+ b = next(it)
+ except StopIteration:
+ return
+
+ try:
+ for i in count():
+ a = b
+ b = next(it)
+ yield i == 0, False, a
+
+ except StopIteration:
+ yield i == 0, True, a
+
+
+def locate(iterable, pred=bool, window_size=None):
+ """Yield the index of each item in *iterable* for which *pred* returns
+ ``True``.
+
+ *pred* defaults to :func:`bool`, which will select truthy items:
+
+ >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
+ [1, 2, 4]
+
+ Set *pred* to a custom function to, e.g., find the indexes for a particular
+ item.
+
+ >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
+ [1, 3]
+
+ If *window_size* is given, then the *pred* function will be called with
+ that many items. This enables searching for sub-sequences:
+
+ >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
+ >>> pred = lambda *args: args == (1, 2, 3)
+ >>> list(locate(iterable, pred=pred, window_size=3))
+ [1, 5, 9]
+
+ Use with :func:`seekable` to find indexes and then retrieve the associated
+ items:
+
+ >>> from itertools import count
+ >>> from more_itertools import seekable
+ >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
+ >>> it = seekable(source)
+ >>> pred = lambda x: x > 100
+ >>> indexes = locate(it, pred=pred)
+ >>> i = next(indexes)
+ >>> it.seek(i)
+ >>> next(it)
+ 106
+
+ """
+ if window_size is None:
+ return compress(count(), map(pred, iterable))
+
+ if window_size < 1:
+ raise ValueError('window size must be at least 1')
+
+ it = windowed(iterable, window_size, fillvalue=_marker)
+ return compress(count(), starmap(pred, it))
+
+
+def lstrip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the beginning
+ for which *pred* returns ``True``.
+
+ For example, to remove a set of items from the start of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(lstrip(iterable, pred))
+ [1, 2, None, 3, False, None]
+
+ This function is analogous to to :func:`str.lstrip`, and is essentially
+ an wrapper for :func:`itertools.dropwhile`.
+
+ """
+ return dropwhile(pred, iterable)
+
+
+def rstrip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the end
+ for which *pred* returns ``True``.
+
+ For example, to remove a set of items from the end of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(rstrip(iterable, pred))
+ [None, False, None, 1, 2, None, 3]
+
+ This function is analogous to :func:`str.rstrip`.
+
+ """
+ cache = []
+ cache_append = cache.append
+ cache_clear = cache.clear
+ for x in iterable:
+ if pred(x):
+ cache_append(x)
+ else:
+ yield from cache
+ cache_clear()
+ yield x
+
+
+def strip(iterable, pred):
+ """Yield the items from *iterable*, but strip any from the
+ beginning and end for which *pred* returns ``True``.
+
+ For example, to remove a set of items from both ends of an iterable:
+
+ >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
+ >>> pred = lambda x: x in {None, False, ''}
+ >>> list(strip(iterable, pred))
+ [1, 2, None, 3]
+
+ This function is analogous to :func:`str.strip`.
+
+ """
+ return rstrip(lstrip(iterable, pred), pred)
+
+
+class islice_extended:
+ """An extension of :func:`itertools.islice` that supports negative values
+ for *stop*, *start*, and *step*.
+
+ >>> iterable = iter('abcdefgh')
+ >>> list(islice_extended(iterable, -4, -1))
+ ['e', 'f', 'g']
+
+ Slices with negative values require some caching of *iterable*, but this
+ function takes care to minimize the amount of memory required.
+
+ For example, you can use a negative step with an infinite iterator:
+
+ >>> from itertools import count
+ >>> list(islice_extended(count(), 110, 99, -2))
+ [110, 108, 106, 104, 102, 100]
+
+ You can also use slice notation directly:
+
+ >>> iterable = map(str, count())
+ >>> it = islice_extended(iterable)[10:20:2]
+ >>> list(it)
+ ['10', '12', '14', '16', '18']
+
+ """
+
+ def __init__(self, iterable, *args):
+ it = iter(iterable)
+ if args:
+ self._iterable = _islice_helper(it, slice(*args))
+ else:
+ self._iterable = it
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._iterable)
+
+ def __getitem__(self, key):
+ if isinstance(key, slice):
+ return islice_extended(_islice_helper(self._iterable, key))
+
+ raise TypeError('islice_extended.__getitem__ argument must be a slice')
+
+
+def _islice_helper(it, s):
+ start = s.start
+ stop = s.stop
+ if s.step == 0:
+ raise ValueError('step argument must be a non-zero integer or None.')
+ step = s.step or 1
+
+ if step > 0:
+ start = 0 if (start is None) else start
+
+ if start < 0:
+ # Consume all but the last -start items
+ cache = deque(enumerate(it, 1), maxlen=-start)
+ len_iter = cache[-1][0] if cache else 0
+
+ # Adjust start to be positive
+ i = max(len_iter + start, 0)
+
+ # Adjust stop to be positive
+ if stop is None:
+ j = len_iter
+ elif stop >= 0:
+ j = min(stop, len_iter)
+ else:
+ j = max(len_iter + stop, 0)
+
+ # Slice the cache
+ n = j - i
+ if n <= 0:
+ return
+
+ for index, item in islice(cache, 0, n, step):
+ yield item
+ elif (stop is not None) and (stop < 0):
+ # Advance to the start position
+ next(islice(it, start, start), None)
+
+ # When stop is negative, we have to carry -stop items while
+ # iterating
+ cache = deque(islice(it, -stop), maxlen=-stop)
+
+ for index, item in enumerate(it):
+ cached_item = cache.popleft()
+ if index % step == 0:
+ yield cached_item
+ cache.append(item)
+ else:
+ # When both start and stop are positive we have the normal case
+ yield from islice(it, start, stop, step)
+ else:
+ start = -1 if (start is None) else start
+
+ if (stop is not None) and (stop < 0):
+ # Consume all but the last items
+ n = -stop - 1
+ cache = deque(enumerate(it, 1), maxlen=n)
+ len_iter = cache[-1][0] if cache else 0
+
+ # If start and stop are both negative they are comparable and
+ # we can just slice. Otherwise we can adjust start to be negative
+ # and then slice.
+ if start < 0:
+ i, j = start, stop
+ else:
+ i, j = min(start - len_iter, -1), None
+
+ for index, item in list(cache)[i:j:step]:
+ yield item
+ else:
+ # Advance to the stop position
+ if stop is not None:
+ m = stop + 1
+ next(islice(it, m, m), None)
+
+ # stop is positive, so if start is negative they are not comparable
+ # and we need the rest of the items.
+ if start < 0:
+ i = start
+ n = None
+ # stop is None and start is positive, so we just need items up to
+ # the start index.
+ elif stop is None:
+ i = None
+ n = start + 1
+ # Both stop and start are positive, so they are comparable.
+ else:
+ i = None
+ n = start - stop
+ if n <= 0:
+ return
+
+ cache = list(islice(it, n))
+
+ yield from cache[i::step]
+
+
+def always_reversible(iterable):
+ """An extension of :func:`reversed` that supports all iterables, not
+ just those which implement the ``Reversible`` or ``Sequence`` protocols.
+
+ >>> print(*always_reversible(x for x in range(3)))
+ 2 1 0
+
+ If the iterable is already reversible, this function returns the
+ result of :func:`reversed()`. If the iterable is not reversible,
+ this function will cache the remaining items in the iterable and
+ yield them in reverse order, which may require significant storage.
+ """
+ try:
+ return reversed(iterable)
+ except TypeError:
+ return reversed(list(iterable))
+
+
+def consecutive_groups(iterable, ordering=lambda x: x):
+ """Yield groups of consecutive items using :func:`itertools.groupby`.
+ The *ordering* function determines whether two items are adjacent by
+ returning their position.
+
+ By default, the ordering function is the identity function. This is
+ suitable for finding runs of numbers:
+
+ >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
+ >>> for group in consecutive_groups(iterable):
+ ... print(list(group))
+ [1]
+ [10, 11, 12]
+ [20]
+ [30, 31, 32, 33]
+ [40]
+
+ For finding runs of adjacent letters, try using the :meth:`index` method
+ of a string of letters:
+
+ >>> from string import ascii_lowercase
+ >>> iterable = 'abcdfgilmnop'
+ >>> ordering = ascii_lowercase.index
+ >>> for group in consecutive_groups(iterable, ordering):
+ ... print(list(group))
+ ['a', 'b', 'c', 'd']
+ ['f', 'g']
+ ['i']
+ ['l', 'm', 'n', 'o', 'p']
+
+ Each group of consecutive items is an iterator that shares it source with
+ *iterable*. When an an output group is advanced, the previous group is
+ no longer available unless its elements are copied (e.g., into a ``list``).
+
+ >>> iterable = [1, 2, 11, 12, 21, 22]
+ >>> saved_groups = []
+ >>> for group in consecutive_groups(iterable):
+ ... saved_groups.append(list(group)) # Copy group elements
+ >>> saved_groups
+ [[1, 2], [11, 12], [21, 22]]
+
+ """
+ for k, g in groupby(
+ enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
+ ):
+ yield map(itemgetter(1), g)
+
+
+def difference(iterable, func=sub, *, initial=None):
+ """This function is the inverse of :func:`itertools.accumulate`. By default
+ it will compute the first difference of *iterable* using
+ :func:`operator.sub`:
+
+ >>> from itertools import accumulate
+ >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
+ >>> list(difference(iterable))
+ [0, 1, 2, 3, 4]
+
+ *func* defaults to :func:`operator.sub`, but other functions can be
+ specified. They will be applied as follows::
+
+ A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
+
+ For example, to do progressive division:
+
+ >>> iterable = [1, 2, 6, 24, 120]
+ >>> func = lambda x, y: x // y
+ >>> list(difference(iterable, func))
+ [1, 2, 3, 4, 5]
+
+ If the *initial* keyword is set, the first element will be skipped when
+ computing successive differences.
+
+ >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
+ >>> list(difference(it, initial=10))
+ [1, 2, 3]
+
+ """
+ a, b = tee(iterable)
+ try:
+ first = [next(b)]
+ except StopIteration:
+ return iter([])
+
+ if initial is not None:
+ first = []
+
+ return chain(first, starmap(func, zip(b, a)))
+
+
+class SequenceView(Sequence):
+ """Return a read-only view of the sequence object *target*.
+
+ :class:`SequenceView` objects are analogous to Python's built-in
+ "dictionary view" types. They provide a dynamic view of a sequence's items,
+ meaning that when the sequence updates, so does the view.
+
+ >>> seq = ['0', '1', '2']
+ >>> view = SequenceView(seq)
+ >>> view
+ SequenceView(['0', '1', '2'])
+ >>> seq.append('3')
+ >>> view
+ SequenceView(['0', '1', '2', '3'])
+
+ Sequence views support indexing, slicing, and length queries. They act
+ like the underlying sequence, except they don't allow assignment:
+
+ >>> view[1]
+ '1'
+ >>> view[1:-1]
+ ['1', '2']
+ >>> len(view)
+ 4
+
+ Sequence views are useful as an alternative to copying, as they don't
+ require (much) extra storage.
+
+ """
+
+ def __init__(self, target):
+ if not isinstance(target, Sequence):
+ raise TypeError
+ self._target = target
+
+ def __getitem__(self, index):
+ return self._target[index]
+
+ def __len__(self):
+ return len(self._target)
+
+ def __repr__(self):
+ return '{}({})'.format(self.__class__.__name__, repr(self._target))
+
+
+class seekable:
+ """Wrap an iterator to allow for seeking backward and forward. This
+ progressively caches the items in the source iterable so they can be
+ re-visited.
+
+ Call :meth:`seek` with an index to seek to that position in the source
+ iterable.
+
+ To "reset" an iterator, seek to ``0``:
+
+ >>> from itertools import count
+ >>> it = seekable((str(n) for n in count()))
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> it.seek(0)
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> next(it)
+ '3'
+
+ You can also seek forward:
+
+ >>> it = seekable((str(n) for n in range(20)))
+ >>> it.seek(10)
+ >>> next(it)
+ '10'
+ >>> it.seek(20) # Seeking past the end of the source isn't a problem
+ >>> list(it)
+ []
+ >>> it.seek(0) # Resetting works even after hitting the end
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+
+ Call :meth:`peek` to look ahead one item without advancing the iterator:
+
+ >>> it = seekable('1234')
+ >>> it.peek()
+ '1'
+ >>> list(it)
+ ['1', '2', '3', '4']
+ >>> it.peek(default='empty')
+ 'empty'
+
+ Before the iterator is at its end, calling :func:`bool` on it will return
+ ``True``. After it will return ``False``:
+
+ >>> it = seekable('5678')
+ >>> bool(it)
+ True
+ >>> list(it)
+ ['5', '6', '7', '8']
+ >>> bool(it)
+ False
+
+ You may view the contents of the cache with the :meth:`elements` method.
+ That returns a :class:`SequenceView`, a view that updates automatically:
+
+ >>> it = seekable((str(n) for n in range(10)))
+ >>> next(it), next(it), next(it)
+ ('0', '1', '2')
+ >>> elements = it.elements()
+ >>> elements
+ SequenceView(['0', '1', '2'])
+ >>> next(it)
+ '3'
+ >>> elements
+ SequenceView(['0', '1', '2', '3'])
+
+ By default, the cache grows as the source iterable progresses, so beware of
+ wrapping very large or infinite iterables. Supply *maxlen* to limit the
+ size of the cache (this of course limits how far back you can seek).
+
+ >>> from itertools import count
+ >>> it = seekable((str(n) for n in count()), maxlen=2)
+ >>> next(it), next(it), next(it), next(it)
+ ('0', '1', '2', '3')
+ >>> list(it.elements())
+ ['2', '3']
+ >>> it.seek(0)
+ >>> next(it), next(it), next(it), next(it)
+ ('2', '3', '4', '5')
+ >>> next(it)
+ '6'
+
+ """
+
+ def __init__(self, iterable, maxlen=None):
+ self._source = iter(iterable)
+ if maxlen is None:
+ self._cache = []
+ else:
+ self._cache = deque([], maxlen)
+ self._index = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._index is not None:
+ try:
+ item = self._cache[self._index]
+ except IndexError:
+ self._index = None
+ else:
+ self._index += 1
+ return item
+
+ item = next(self._source)
+ self._cache.append(item)
+ return item
+
+ def __bool__(self):
+ try:
+ self.peek()
+ except StopIteration:
+ return False
+ return True
+
+ def peek(self, default=_marker):
+ try:
+ peeked = next(self)
+ except StopIteration:
+ if default is _marker:
+ raise
+ return default
+ if self._index is None:
+ self._index = len(self._cache)
+ self._index -= 1
+ return peeked
+
+ def elements(self):
+ return SequenceView(self._cache)
+
+ def seek(self, index):
+ self._index = index
+ remainder = index - len(self._cache)
+ if remainder > 0:
+ consume(self, remainder)
+
+
+class run_length:
+ """
+ :func:`run_length.encode` compresses an iterable with run-length encoding.
+ It yields groups of repeated items with the count of how many times they
+ were repeated:
+
+ >>> uncompressed = 'abbcccdddd'
+ >>> list(run_length.encode(uncompressed))
+ [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+
+ :func:`run_length.decode` decompresses an iterable that was previously
+ compressed with run-length encoding. It yields the items of the
+ decompressed iterable:
+
+ >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
+ >>> list(run_length.decode(compressed))
+ ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
+
+ """
+
+ @staticmethod
+ def encode(iterable):
+ return ((k, ilen(g)) for k, g in groupby(iterable))
+
+ @staticmethod
+ def decode(iterable):
+ return chain.from_iterable(repeat(k, n) for k, n in iterable)
+
+
+def exactly_n(iterable, n, predicate=bool):
+ """Return ``True`` if exactly ``n`` items in the iterable are ``True``
+ according to the *predicate* function.
+
+ >>> exactly_n([True, True, False], 2)
+ True
+ >>> exactly_n([True, True, False], 1)
+ False
+ >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
+ True
+
+ The iterable will be advanced until ``n + 1`` truthy items are encountered,
+ so avoid calling it on infinite iterables.
+
+ """
+ return len(take(n + 1, filter(predicate, iterable))) == n
+
+
+def circular_shifts(iterable):
+ """Return a list of circular shifts of *iterable*.
+
+ >>> circular_shifts(range(4))
+ [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
+ """
+ lst = list(iterable)
+ return take(len(lst), windowed(cycle(lst), len(lst)))
+
+
+def make_decorator(wrapping_func, result_index=0):
+ """Return a decorator version of *wrapping_func*, which is a function that
+ modifies an iterable. *result_index* is the position in that function's
+ signature where the iterable goes.
+
+ This lets you use itertools on the "production end," i.e. at function
+ definition. This can augment what the function returns without changing the
+ function's code.
+
+ For example, to produce a decorator version of :func:`chunked`:
+
+ >>> from more_itertools import chunked
+ >>> chunker = make_decorator(chunked, result_index=0)
+ >>> @chunker(3)
+ ... def iter_range(n):
+ ... return iter(range(n))
+ ...
+ >>> list(iter_range(9))
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+
+ To only allow truthy items to be returned:
+
+ >>> truth_serum = make_decorator(filter, result_index=1)
+ >>> @truth_serum(bool)
+ ... def boolean_test():
+ ... return [0, 1, '', ' ', False, True]
+ ...
+ >>> list(boolean_test())
+ [1, ' ', True]
+
+ The :func:`peekable` and :func:`seekable` wrappers make for practical
+ decorators:
+
+ >>> from more_itertools import peekable
+ >>> peekable_function = make_decorator(peekable)
+ >>> @peekable_function()
+ ... def str_range(*args):
+ ... return (str(x) for x in range(*args))
+ ...
+ >>> it = str_range(1, 20, 2)
+ >>> next(it), next(it), next(it)
+ ('1', '3', '5')
+ >>> it.peek()
+ '7'
+ >>> next(it)
+ '7'
+
+ """
+ # See https://sites.google.com/site/bbayles/index/decorator_factory for
+ # notes on how this works.
+ def decorator(*wrapping_args, **wrapping_kwargs):
+ def outer_wrapper(f):
+ def inner_wrapper(*args, **kwargs):
+ result = f(*args, **kwargs)
+ wrapping_args_ = list(wrapping_args)
+ wrapping_args_.insert(result_index, result)
+ return wrapping_func(*wrapping_args_, **wrapping_kwargs)
+
+ return inner_wrapper
+
+ return outer_wrapper
+
+ return decorator
+
+
+def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
+ """Return a dictionary that maps the items in *iterable* to categories
+ defined by *keyfunc*, transforms them with *valuefunc*, and
+ then summarizes them by category with *reducefunc*.
+
+ *valuefunc* defaults to the identity function if it is unspecified.
+ If *reducefunc* is unspecified, no summarization takes place:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> result = map_reduce('abbccc', keyfunc)
+ >>> sorted(result.items())
+ [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
+
+ Specifying *valuefunc* transforms the categorized items:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> valuefunc = lambda x: 1
+ >>> result = map_reduce('abbccc', keyfunc, valuefunc)
+ >>> sorted(result.items())
+ [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
+
+ Specifying *reducefunc* summarizes the categorized items:
+
+ >>> keyfunc = lambda x: x.upper()
+ >>> valuefunc = lambda x: 1
+ >>> reducefunc = sum
+ >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
+ >>> sorted(result.items())
+ [('A', 1), ('B', 2), ('C', 3)]
+
+ You may want to filter the input iterable before applying the map/reduce
+ procedure:
+
+ >>> all_items = range(30)
+ >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
+ >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
+ >>> categories = map_reduce(items, keyfunc=keyfunc)
+ >>> sorted(categories.items())
+ [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
+ >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
+ >>> sorted(summaries.items())
+ [(0, 90), (1, 75)]
+
+ Note that all items in the iterable are gathered into a list before the
+ summarization step, which may require significant storage.
+
+ The returned object is a :obj:`collections.defaultdict` with the
+ ``default_factory`` set to ``None``, such that it behaves like a normal
+ dictionary.
+
+ """
+ valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
+
+ ret = defaultdict(list)
+ for item in iterable:
+ key = keyfunc(item)
+ value = valuefunc(item)
+ ret[key].append(value)
+
+ if reducefunc is not None:
+ for key, value_list in ret.items():
+ ret[key] = reducefunc(value_list)
+
+ ret.default_factory = None
+ return ret
+
+
+def rlocate(iterable, pred=bool, window_size=None):
+ """Yield the index of each item in *iterable* for which *pred* returns
+ ``True``, starting from the right and moving left.
+
+ *pred* defaults to :func:`bool`, which will select truthy items:
+
+ >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
+ [4, 2, 1]
+
+ Set *pred* to a custom function to, e.g., find the indexes for a particular
+ item:
+
+ >>> iterable = iter('abcb')
+ >>> pred = lambda x: x == 'b'
+ >>> list(rlocate(iterable, pred))
+ [3, 1]
+
+ If *window_size* is given, then the *pred* function will be called with
+ that many items. This enables searching for sub-sequences:
+
+ >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
+ >>> pred = lambda *args: args == (1, 2, 3)
+ >>> list(rlocate(iterable, pred=pred, window_size=3))
+ [9, 5, 1]
+
+ Beware, this function won't return anything for infinite iterables.
+ If *iterable* is reversible, ``rlocate`` will reverse it and search from
+ the right. Otherwise, it will search from the left and return the results
+ in reverse order.
+
+ See :func:`locate` to for other example applications.
+
+ """
+ if window_size is None:
+ try:
+ len_iter = len(iterable)
+ return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
+ except TypeError:
+ pass
+
+ return reversed(list(locate(iterable, pred, window_size)))
+
+
+def replace(iterable, pred, substitutes, count=None, window_size=1):
+ """Yield the items from *iterable*, replacing the items for which *pred*
+ returns ``True`` with the items from the iterable *substitutes*.
+
+ >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
+ >>> pred = lambda x: x == 0
+ >>> substitutes = (2, 3)
+ >>> list(replace(iterable, pred, substitutes))
+ [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
+
+ If *count* is given, the number of replacements will be limited:
+
+ >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
+ >>> pred = lambda x: x == 0
+ >>> substitutes = [None]
+ >>> list(replace(iterable, pred, substitutes, count=2))
+ [1, 1, None, 1, 1, None, 1, 1, 0]
+
+ Use *window_size* to control the number of items passed as arguments to
+ *pred*. This allows for locating and replacing subsequences.
+
+ >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
+ >>> window_size = 3
+ >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
+ >>> substitutes = [3, 4] # Splice in these items
+ >>> list(replace(iterable, pred, substitutes, window_size=window_size))
+ [3, 4, 5, 3, 4, 5]
+
+ """
+ if window_size < 1:
+ raise ValueError('window_size must be at least 1')
+
+ # Save the substitutes iterable, since it's used more than once
+ substitutes = tuple(substitutes)
+
+ # Add padding such that the number of windows matches the length of the
+ # iterable
+ it = chain(iterable, [_marker] * (window_size - 1))
+ windows = windowed(it, window_size)
+
+ n = 0
+ for w in windows:
+ # If the current window matches our predicate (and we haven't hit
+ # our maximum number of replacements), splice in the substitutes
+ # and then consume the following windows that overlap with this one.
+ # For example, if the iterable is (0, 1, 2, 3, 4...)
+ # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
+ # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
+ if pred(*w):
+ if (count is None) or (n < count):
+ n += 1
+ yield from substitutes
+ consume(windows, window_size - 1)
+ continue
+
+ # If there was no match (or we've reached the replacement limit),
+ # yield the first item from the window.
+ if w and (w[0] is not _marker):
+ yield w[0]
+
+
+def partitions(iterable):
+ """Yield all possible order-preserving partitions of *iterable*.
+
+ >>> iterable = 'abc'
+ >>> for part in partitions(iterable):
+ ... print([''.join(p) for p in part])
+ ['abc']
+ ['a', 'bc']
+ ['ab', 'c']
+ ['a', 'b', 'c']
+
+ This is unrelated to :func:`partition`.
+
+ """
+ sequence = list(iterable)
+ n = len(sequence)
+ for i in powerset(range(1, n)):
+ yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
+
+
+def set_partitions(iterable, k=None):
+ """
+ Yield the set partitions of *iterable* into *k* parts. Set partitions are
+ not order-preserving.
+
+ >>> iterable = 'abc'
+ >>> for part in set_partitions(iterable, 2):
+ ... print([''.join(p) for p in part])
+ ['a', 'bc']
+ ['ab', 'c']
+ ['b', 'ac']
+
+
+ If *k* is not given, every set partition is generated.
+
+ >>> iterable = 'abc'
+ >>> for part in set_partitions(iterable):
+ ... print([''.join(p) for p in part])
+ ['abc']
+ ['a', 'bc']
+ ['ab', 'c']
+ ['b', 'ac']
+ ['a', 'b', 'c']
+
+ """
+ L = list(iterable)
+ n = len(L)
+ if k is not None:
+ if k < 1:
+ raise ValueError(
+ "Can't partition in a negative or zero number of groups"
+ )
+ elif k > n:
+ return
+
+ def set_partitions_helper(L, k):
+ n = len(L)
+ if k == 1:
+ yield [L]
+ elif n == k:
+ yield [[s] for s in L]
+ else:
+ e, *M = L
+ for p in set_partitions_helper(M, k - 1):
+ yield [[e], *p]
+ for p in set_partitions_helper(M, k):
+ for i in range(len(p)):
+ yield p[:i] + [[e] + p[i]] + p[i + 1 :]
+
+ if k is None:
+ for k in range(1, n + 1):
+ yield from set_partitions_helper(L, k)
+ else:
+ yield from set_partitions_helper(L, k)
+
+
+class time_limited:
+ """
+ Yield items from *iterable* until *limit_seconds* have passed.
+ If the time limit expires before all items have been yielded, the
+ ``timed_out`` parameter will be set to ``True``.
+
+ >>> from time import sleep
+ >>> def generator():
+ ... yield 1
+ ... yield 2
+ ... sleep(0.2)
+ ... yield 3
+ >>> iterable = time_limited(0.1, generator())
+ >>> list(iterable)
+ [1, 2]
+ >>> iterable.timed_out
+ True
+
+ Note that the time is checked before each item is yielded, and iteration
+ stops if the time elapsed is greater than *limit_seconds*. If your time
+ limit is 1 second, but it takes 2 seconds to generate the first item from
+ the iterable, the function will run for 2 seconds and not yield anything.
+
+ """
+
+ def __init__(self, limit_seconds, iterable):
+ if limit_seconds < 0:
+ raise ValueError('limit_seconds must be positive')
+ self.limit_seconds = limit_seconds
+ self._iterable = iter(iterable)
+ self._start_time = monotonic()
+ self.timed_out = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ item = next(self._iterable)
+ if monotonic() - self._start_time > self.limit_seconds:
+ self.timed_out = True
+ raise StopIteration
+
+ return item
+
+
+def only(iterable, default=None, too_long=None):
+ """If *iterable* has only one item, return it.
+ If it has zero items, return *default*.
+ If it has more than one item, raise the exception given by *too_long*,
+ which is ``ValueError`` by default.
+
+ >>> only([], default='missing')
+ 'missing'
+ >>> only([1])
+ 1
+ >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected exactly one item in iterable, but got 1, 2,
+ and perhaps more.'
+ >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ TypeError
+
+ Note that :func:`only` attempts to advance *iterable* twice to ensure there
+ is only one item. See :func:`spy` or :func:`peekable` to check
+ iterable contents less destructively.
+ """
+ it = iter(iterable)
+ first_value = next(it, default)
+
+ try:
+ second_value = next(it)
+ except StopIteration:
+ pass
+ else:
+ msg = (
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
+ 'and perhaps more.'.format(first_value, second_value)
+ )
+ raise too_long or ValueError(msg)
+
+ return first_value
+
+
+def ichunked(iterable, n):
+ """Break *iterable* into sub-iterables with *n* elements each.
+ :func:`ichunked` is like :func:`chunked`, but it yields iterables
+ instead of lists.
+
+ If the sub-iterables are read in order, the elements of *iterable*
+ won't be stored in memory.
+ If they are read out of order, :func:`itertools.tee` is used to cache
+ elements as necessary.
+
+ >>> from itertools import count
+ >>> all_chunks = ichunked(count(), 4)
+ >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
+ >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
+ [4, 5, 6, 7]
+ >>> list(c_1)
+ [0, 1, 2, 3]
+ >>> list(c_3)
+ [8, 9, 10, 11]
+
+ """
+ source = iter(iterable)
+
+ while True:
+ # Check to see whether we're at the end of the source iterable
+ item = next(source, _marker)
+ if item is _marker:
+ return
+
+ # Clone the source and yield an n-length slice
+ source, it = tee(chain([item], source))
+ yield islice(it, n)
+
+ # Advance the source iterable
+ consume(source, n)
+
+
+def distinct_combinations(iterable, r):
+ """Yield the distinct combinations of *r* items taken from *iterable*.
+
+ >>> list(distinct_combinations([0, 0, 1], 2))
+ [(0, 0), (0, 1)]
+
+ Equivalent to ``set(combinations(iterable))``, except duplicates are not
+ generated and thrown away. For larger input sequences this is much more
+ efficient.
+
+ """
+ if r < 0:
+ raise ValueError('r must be non-negative')
+ elif r == 0:
+ yield ()
+ return
+ pool = tuple(iterable)
+ generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
+ current_combo = [None] * r
+ level = 0
+ while generators:
+ try:
+ cur_idx, p = next(generators[-1])
+ except StopIteration:
+ generators.pop()
+ level -= 1
+ continue
+ current_combo[level] = p
+ if level + 1 == r:
+ yield tuple(current_combo)
+ else:
+ generators.append(
+ unique_everseen(
+ enumerate(pool[cur_idx + 1 :], cur_idx + 1),
+ key=itemgetter(1),
+ )
+ )
+ level += 1
+
+
+def filter_except(validator, iterable, *exceptions):
+ """Yield the items from *iterable* for which the *validator* function does
+ not raise one of the specified *exceptions*.
+
+ *validator* is called for each item in *iterable*.
+ It should be a function that accepts one argument and raises an exception
+ if that item is not valid.
+
+ >>> iterable = ['1', '2', 'three', '4', None]
+ >>> list(filter_except(int, iterable, ValueError, TypeError))
+ ['1', '2', '4']
+
+ If an exception other than one given by *exceptions* is raised by
+ *validator*, it is raised like normal.
+ """
+ for item in iterable:
+ try:
+ validator(item)
+ except exceptions:
+ pass
+ else:
+ yield item
+
+
+def map_except(function, iterable, *exceptions):
+ """Transform each item from *iterable* with *function* and yield the
+ result, unless *function* raises one of the specified *exceptions*.
+
+ *function* is called to transform each item in *iterable*.
+ It should accept one argument.
+
+ >>> iterable = ['1', '2', 'three', '4', None]
+ >>> list(map_except(int, iterable, ValueError, TypeError))
+ [1, 2, 4]
+
+ If an exception other than one given by *exceptions* is raised by
+ *function*, it is raised like normal.
+ """
+ for item in iterable:
+ try:
+ yield function(item)
+ except exceptions:
+ pass
+
+
+def map_if(iterable, pred, func, func_else=lambda x: x):
+ """Evaluate each item from *iterable* using *pred*. If the result is
+ equivalent to ``True``, transform the item with *func* and yield it.
+ Otherwise, transform the item with *func_else* and yield it.
+
+ *pred*, *func*, and *func_else* should each be functions that accept
+ one argument. By default, *func_else* is the identity function.
+
+ >>> from math import sqrt
+ >>> iterable = list(range(-5, 5))
+ >>> iterable
+ [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
+ >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
+ [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
+ >>> list(map_if(iterable, lambda x: x >= 0,
+ ... lambda x: f'{sqrt(x):.2f}', lambda x: None))
+ [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
+ """
+ for item in iterable:
+ yield func(item) if pred(item) else func_else(item)
+
+
+def _sample_unweighted(iterable, k):
+ # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
+ # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
+
+ # Fill up the reservoir (collection of samples) with the first `k` samples
+ reservoir = take(k, iterable)
+
+ # Generate random number that's the largest in a sample of k U(0,1) numbers
+ # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
+ W = exp(log(random()) / k)
+
+ # The number of elements to skip before changing the reservoir is a random
+ # number with a geometric distribution. Sample it using random() and logs.
+ next_index = k + floor(log(random()) / log(1 - W))
+
+ for index, element in enumerate(iterable, k):
+
+ if index == next_index:
+ reservoir[randrange(k)] = element
+ # The new W is the largest in a sample of k U(0, `old_W`) numbers
+ W *= exp(log(random()) / k)
+ next_index += floor(log(random()) / log(1 - W)) + 1
+
+ return reservoir
+
+
+def _sample_weighted(iterable, k, weights):
+ # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
+ # "Weighted random sampling with a reservoir".
+
+ # Log-transform for numerical stability for weights that are small/large
+ weight_keys = (log(random()) / weight for weight in weights)
+
+ # Fill up the reservoir (collection of samples) with the first `k`
+ # weight-keys and elements, then heapify the list.
+ reservoir = take(k, zip(weight_keys, iterable))
+ heapify(reservoir)
+
+ # The number of jumps before changing the reservoir is a random variable
+ # with an exponential distribution. Sample it using random() and logs.
+ smallest_weight_key, _ = reservoir[0]
+ weights_to_skip = log(random()) / smallest_weight_key
+
+ for weight, element in zip(weights, iterable):
+ if weight >= weights_to_skip:
+ # The notation here is consistent with the paper, but we store
+ # the weight-keys in log-space for better numerical stability.
+ smallest_weight_key, _ = reservoir[0]
+ t_w = exp(weight * smallest_weight_key)
+ r_2 = uniform(t_w, 1) # generate U(t_w, 1)
+ weight_key = log(r_2) / weight
+ heapreplace(reservoir, (weight_key, element))
+ smallest_weight_key, _ = reservoir[0]
+ weights_to_skip = log(random()) / smallest_weight_key
+ else:
+ weights_to_skip -= weight
+
+ # Equivalent to [element for weight_key, element in sorted(reservoir)]
+ return [heappop(reservoir)[1] for _ in range(k)]
+
+
+def sample(iterable, k, weights=None):
+ """Return a *k*-length list of elements chosen (without replacement)
+ from the *iterable*. Like :func:`random.sample`, but works on iterables
+ of unknown length.
+
+ >>> iterable = range(100)
+ >>> sample(iterable, 5) # doctest: +SKIP
+ [81, 60, 96, 16, 4]
+
+ An iterable with *weights* may also be given:
+
+ >>> iterable = range(100)
+ >>> weights = (i * i + 1 for i in range(100))
+ >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
+ [79, 67, 74, 66, 78]
+
+ The algorithm can also be used to generate weighted random permutations.
+ The relative weight of each item determines the probability that it
+ appears late in the permutation.
+
+ >>> data = "abcdefgh"
+ >>> weights = range(1, len(data) + 1)
+ >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
+ ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
+ """
+ if k == 0:
+ return []
+
+ iterable = iter(iterable)
+ if weights is None:
+ return _sample_unweighted(iterable, k)
+ else:
+ weights = iter(weights)
+ return _sample_weighted(iterable, k, weights)
+
+
+def is_sorted(iterable, key=None, reverse=False, strict=False):
+ """Returns ``True`` if the items of iterable are in sorted order, and
+ ``False`` otherwise. *key* and *reverse* have the same meaning that they do
+ in the built-in :func:`sorted` function.
+
+ >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
+ True
+ >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
+ False
+
+ If *strict*, tests for strict sorting, that is, returns ``False`` if equal
+ elements are found:
+
+ >>> is_sorted([1, 2, 2])
+ True
+ >>> is_sorted([1, 2, 2], strict=True)
+ False
+
+ The function returns ``False`` after encountering the first out-of-order
+ item. If there are no out-of-order items, the iterable is exhausted.
+ """
+
+ compare = (le if reverse else ge) if strict else (lt if reverse else gt)
+ it = iterable if key is None else map(key, iterable)
+ return not any(starmap(compare, pairwise(it)))
+
+
+class AbortThread(BaseException):
+ pass
+
+
+class callback_iter:
+ """Convert a function that uses callbacks to an iterator.
+
+ Let *func* be a function that takes a `callback` keyword argument.
+ For example:
+
+ >>> def func(callback=None):
+ ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
+ ... if callback:
+ ... callback(i, c)
+ ... return 4
+
+
+ Use ``with callback_iter(func)`` to get an iterator over the parameters
+ that are delivered to the callback.
+
+ >>> with callback_iter(func) as it:
+ ... for args, kwargs in it:
+ ... print(args)
+ (1, 'a')
+ (2, 'b')
+ (3, 'c')
+
+ The function will be called in a background thread. The ``done`` property
+ indicates whether it has completed execution.
+
+ >>> it.done
+ True
+
+ If it completes successfully, its return value will be available
+ in the ``result`` property.
+
+ >>> it.result
+ 4
+
+ Notes:
+
+ * If the function uses some keyword argument besides ``callback``, supply
+ *callback_kwd*.
+ * If it finished executing, but raised an exception, accessing the
+ ``result`` property will raise the same exception.
+ * If it hasn't finished executing, accessing the ``result``
+ property from within the ``with`` block will raise ``RuntimeError``.
+ * If it hasn't finished executing, accessing the ``result`` property from
+ outside the ``with`` block will raise a
+ ``more_itertools.AbortThread`` exception.
+ * Provide *wait_seconds* to adjust how frequently the it is polled for
+ output.
+
+ """
+
+ def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
+ self._func = func
+ self._callback_kwd = callback_kwd
+ self._aborted = False
+ self._future = None
+ self._wait_seconds = wait_seconds
+ self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
+ self._iterator = self._reader()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._aborted = True
+ self._executor.shutdown()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._iterator)
+
+ @property
+ def done(self):
+ if self._future is None:
+ return False
+ return self._future.done()
+
+ @property
+ def result(self):
+ if not self.done:
+ raise RuntimeError('Function has not yet completed')
+
+ return self._future.result()
+
+ def _reader(self):
+ q = Queue()
+
+ def callback(*args, **kwargs):
+ if self._aborted:
+ raise AbortThread('canceled by user')
+
+ q.put((args, kwargs))
+
+ self._future = self._executor.submit(
+ self._func, **{self._callback_kwd: callback}
+ )
+
+ while True:
+ try:
+ item = q.get(timeout=self._wait_seconds)
+ except Empty:
+ pass
+ else:
+ q.task_done()
+ yield item
+
+ if self._future.done():
+ break
+
+ remaining = []
+ while True:
+ try:
+ item = q.get_nowait()
+ except Empty:
+ break
+ else:
+ q.task_done()
+ remaining.append(item)
+ q.join()
+ yield from remaining
+
+
+def windowed_complete(iterable, n):
+ """
+ Yield ``(beginning, middle, end)`` tuples, where:
+
+ * Each ``middle`` has *n* items from *iterable*
+ * Each ``beginning`` has the items before the ones in ``middle``
+ * Each ``end`` has the items after the ones in ``middle``
+
+ >>> iterable = range(7)
+ >>> n = 3
+ >>> for beginning, middle, end in windowed_complete(iterable, n):
+ ... print(beginning, middle, end)
+ () (0, 1, 2) (3, 4, 5, 6)
+ (0,) (1, 2, 3) (4, 5, 6)
+ (0, 1) (2, 3, 4) (5, 6)
+ (0, 1, 2) (3, 4, 5) (6,)
+ (0, 1, 2, 3) (4, 5, 6) ()
+
+ Note that *n* must be at least 0 and most equal to the length of
+ *iterable*.
+
+ This function will exhaust the iterable and may require significant
+ storage.
+ """
+ if n < 0:
+ raise ValueError('n must be >= 0')
+
+ seq = tuple(iterable)
+ size = len(seq)
+
+ if n > size:
+ raise ValueError('n must be <= len(seq)')
+
+ for i in range(size - n + 1):
+ beginning = seq[:i]
+ middle = seq[i : i + n]
+ end = seq[i + n :]
+ yield beginning, middle, end
+
+
+def all_unique(iterable, key=None):
+ """
+ Returns ``True`` if all the elements of *iterable* are unique (no two
+ elements are equal).
+
+ >>> all_unique('ABCB')
+ False
+
+ If a *key* function is specified, it will be used to make comparisons.
+
+ >>> all_unique('ABCb')
+ True
+ >>> all_unique('ABCb', str.lower)
+ False
+
+ The function returns as soon as the first non-unique element is
+ encountered. Iterables with a mix of hashable and unhashable items can
+ be used, but the function will be slower for unhashable items.
+ """
+ seenset = set()
+ seenset_add = seenset.add
+ seenlist = []
+ seenlist_add = seenlist.append
+ for element in map(key, iterable) if key else iterable:
+ try:
+ if element in seenset:
+ return False
+ seenset_add(element)
+ except TypeError:
+ if element in seenlist:
+ return False
+ seenlist_add(element)
+ return True
+
+
+def nth_product(index, *args):
+ """Equivalent to ``list(product(*args))[index]``.
+
+ The products of *args* can be ordered lexicographically.
+ :func:`nth_product` computes the product at sort position *index* without
+ computing the previous products.
+
+ >>> nth_product(8, range(2), range(2), range(2), range(2))
+ (1, 0, 0, 0)
+
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pools = list(map(tuple, reversed(args)))
+ ns = list(map(len, pools))
+
+ c = reduce(mul, ns)
+
+ if index < 0:
+ index += c
+
+ if not 0 <= index < c:
+ raise IndexError
+
+ result = []
+ for pool, n in zip(pools, ns):
+ result.append(pool[index % n])
+ index //= n
+
+ return tuple(reversed(result))
+
+
+def nth_permutation(iterable, r, index):
+ """Equivalent to ``list(permutations(iterable, r))[index]```
+
+ The subsequences of *iterable* that are of length *r* where order is
+ important can be ordered lexicographically. :func:`nth_permutation`
+ computes the subsequence at sort position *index* directly, without
+ computing the previous subsequences.
+
+ >>> nth_permutation('ghijk', 2, 5)
+ ('h', 'i')
+
+ ``ValueError`` will be raised If *r* is negative or greater than the length
+ of *iterable*.
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pool = list(iterable)
+ n = len(pool)
+
+ if r is None or r == n:
+ r, c = n, factorial(n)
+ elif not 0 <= r < n:
+ raise ValueError
+ else:
+ c = factorial(n) // factorial(n - r)
+
+ if index < 0:
+ index += c
+
+ if not 0 <= index < c:
+ raise IndexError
+
+ if c == 0:
+ return tuple()
+
+ result = [0] * r
+ q = index * factorial(n) // c if r < n else index
+ for d in range(1, n + 1):
+ q, i = divmod(q, d)
+ if 0 <= n - d < r:
+ result[n - d] = i
+ if q == 0:
+ break
+
+ return tuple(map(pool.pop, result))
+
+
+def value_chain(*args):
+ """Yield all arguments passed to the function in the same order in which
+ they were passed. If an argument itself is iterable then iterate over its
+ values.
+
+ >>> list(value_chain(1, 2, 3, [4, 5, 6]))
+ [1, 2, 3, 4, 5, 6]
+
+ Binary and text strings are not considered iterable and are emitted
+ as-is:
+
+ >>> list(value_chain('12', '34', ['56', '78']))
+ ['12', '34', '56', '78']
+
+
+ Multiple levels of nesting are not flattened.
+
+ """
+ for value in args:
+ if isinstance(value, (str, bytes)):
+ yield value
+ continue
+ try:
+ yield from value
+ except TypeError:
+ yield value
+
+
+def product_index(element, *args):
+ """Equivalent to ``list(product(*args)).index(element)``
+
+ The products of *args* can be ordered lexicographically.
+ :func:`product_index` computes the first index of *element* without
+ computing the previous products.
+
+ >>> product_index([8, 2], range(10), range(5))
+ 42
+
+ ``ValueError`` will be raised if the given *element* isn't in the product
+ of *args*.
+ """
+ index = 0
+
+ for x, pool in zip_longest(element, args, fillvalue=_marker):
+ if x is _marker or pool is _marker:
+ raise ValueError('element is not a product of args')
+
+ pool = tuple(pool)
+ index = index * len(pool) + pool.index(x)
+
+ return index
+
+
+def combination_index(element, iterable):
+ """Equivalent to ``list(combinations(iterable, r)).index(element)``
+
+ The subsequences of *iterable* that are of length *r* can be ordered
+ lexicographically. :func:`combination_index` computes the index of the
+ first *element*, without computing the previous combinations.
+
+ >>> combination_index('adf', 'abcdefg')
+ 10
+
+ ``ValueError`` will be raised if the given *element* isn't one of the
+ combinations of *iterable*.
+ """
+ element = enumerate(element)
+ k, y = next(element, (None, None))
+ if k is None:
+ return 0
+
+ indexes = []
+ pool = enumerate(iterable)
+ for n, x in pool:
+ if x == y:
+ indexes.append(n)
+ tmp, y = next(element, (None, None))
+ if tmp is None:
+ break
+ else:
+ k = tmp
+ else:
+ raise ValueError('element is not a combination of iterable')
+
+ n, _ = last(pool, default=(n, None))
+
+ # Python versiosn below 3.8 don't have math.comb
+ index = 1
+ for i, j in enumerate(reversed(indexes), start=1):
+ j = n - j
+ if i <= j:
+ index += factorial(j) // (factorial(i) * factorial(j - i))
+
+ return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
+
+
+def permutation_index(element, iterable):
+ """Equivalent to ``list(permutations(iterable, r)).index(element)```
+
+ The subsequences of *iterable* that are of length *r* where order is
+ important can be ordered lexicographically. :func:`permutation_index`
+ computes the index of the first *element* directly, without computing
+ the previous permutations.
+
+ >>> permutation_index([1, 3, 2], range(5))
+ 19
+
+ ``ValueError`` will be raised if the given *element* isn't one of the
+ permutations of *iterable*.
+ """
+ index = 0
+ pool = list(iterable)
+ for i, x in zip(range(len(pool), -1, -1), element):
+ r = pool.index(x)
+ index = index * i + r
+ del pool[r]
+
+ return index
+
+
+class countable:
+ """Wrap *iterable* and keep a count of how many items have been consumed.
+
+ The ``items_seen`` attribute starts at ``0`` and increments as the iterable
+ is consumed:
+
+ >>> iterable = map(str, range(10))
+ >>> it = countable(iterable)
+ >>> it.items_seen
+ 0
+ >>> next(it), next(it)
+ ('0', '1')
+ >>> list(it)
+ ['2', '3', '4', '5', '6', '7', '8', '9']
+ >>> it.items_seen
+ 10
+ """
+
+ def __init__(self, iterable):
+ self._it = iter(iterable)
+ self.items_seen = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ item = next(self._it)
+ self.items_seen += 1
+
+ return item
+
+
+def chunked_even(iterable, n):
+ """Break *iterable* into lists of approximately length *n*.
+ Items are distributed such the lengths of the lists differ by at most
+ 1 item.
+
+ >>> iterable = [1, 2, 3, 4, 5, 6, 7]
+ >>> n = 3
+ >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
+ [[1, 2, 3], [4, 5], [6, 7]]
+ >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
+ [[1, 2, 3], [4, 5, 6], [7]]
+
+ """
+
+ len_method = getattr(iterable, '__len__', None)
+
+ if len_method is None:
+ return _chunked_even_online(iterable, n)
+ else:
+ return _chunked_even_finite(iterable, len_method(), n)
+
+
+def _chunked_even_online(iterable, n):
+ buffer = []
+ maxbuf = n + (n - 2) * (n - 1)
+ for x in iterable:
+ buffer.append(x)
+ if len(buffer) == maxbuf:
+ yield buffer[:n]
+ buffer = buffer[n:]
+ yield from _chunked_even_finite(buffer, len(buffer), n)
+
+
+def _chunked_even_finite(iterable, N, n):
+ if N < 1:
+ return
+
+ # Lists are either size `full_size <= n` or `partial_size = full_size - 1`
+ q, r = divmod(N, n)
+ num_lists = q + (1 if r > 0 else 0)
+ q, r = divmod(N, num_lists)
+ full_size = q + (1 if r > 0 else 0)
+ partial_size = full_size - 1
+ num_full = N - partial_size * num_lists
+ num_partial = num_lists - num_full
+
+ buffer = []
+ iterator = iter(iterable)
+
+ # Yield num_full lists of full_size
+ for x in iterator:
+ buffer.append(x)
+ if len(buffer) == full_size:
+ yield buffer
+ buffer = []
+ num_full -= 1
+ if num_full <= 0:
+ break
+
+ # Yield num_partial lists of partial_size
+ for x in iterator:
+ buffer.append(x)
+ if len(buffer) == partial_size:
+ yield buffer
+ buffer = []
+ num_partial -= 1
+
+
+def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
+ """A version of :func:`zip` that "broadcasts" any scalar
+ (i.e., non-iterable) items into output tuples.
+
+ >>> iterable_1 = [1, 2, 3]
+ >>> iterable_2 = ['a', 'b', 'c']
+ >>> scalar = '_'
+ >>> list(zip_broadcast(iterable_1, iterable_2, scalar))
+ [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
+
+ The *scalar_types* keyword argument determines what types are considered
+ scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
+ treat strings and byte strings as iterable:
+
+ >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
+ [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
+
+ If the *strict* keyword argument is ``True``, then
+ ``UnequalIterablesError`` will be raised if any of the iterables have
+ different lengthss.
+ """
+
+ def is_scalar(obj):
+ if scalar_types and isinstance(obj, scalar_types):
+ return True
+ try:
+ iter(obj)
+ except TypeError:
+ return True
+ else:
+ return False
+
+ size = len(objects)
+ if not size:
+ return
+
+ iterables, iterable_positions = [], []
+ scalars, scalar_positions = [], []
+ for i, obj in enumerate(objects):
+ if is_scalar(obj):
+ scalars.append(obj)
+ scalar_positions.append(i)
+ else:
+ iterables.append(iter(obj))
+ iterable_positions.append(i)
+
+ if len(scalars) == size:
+ yield tuple(objects)
+ return
+
+ zipper = _zip_equal if strict else zip
+ for item in zipper(*iterables):
+ new_item = [None] * size
+
+ for i, elem in zip(iterable_positions, item):
+ new_item[i] = elem
+
+ for i, elem in zip(scalar_positions, scalars):
+ new_item[i] = elem
+
+ yield tuple(new_item)
+
+
+def unique_in_window(iterable, n, key=None):
+ """Yield the items from *iterable* that haven't been seen recently.
+ *n* is the size of the lookback window.
+
+ >>> iterable = [0, 1, 0, 2, 3, 0]
+ >>> n = 3
+ >>> list(unique_in_window(iterable, n))
+ [0, 1, 2, 3, 0]
+
+ The *key* function, if provided, will be used to determine uniqueness:
+
+ >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
+ ['a', 'b', 'c', 'd', 'a']
+
+ The items in *iterable* must be hashable.
+
+ """
+ if n <= 0:
+ raise ValueError('n must be greater than 0')
+
+ window = deque(maxlen=n)
+ uniques = set()
+ use_key = key is not None
+
+ for item in iterable:
+ k = key(item) if use_key else item
+ if k in uniques:
+ continue
+
+ if len(uniques) == n:
+ uniques.discard(window[0])
+
+ uniques.add(k)
+ window.append(k)
+
+ yield item
+
+
+def duplicates_everseen(iterable, key=None):
+ """Yield duplicate elements after their first appearance.
+
+ >>> list(duplicates_everseen('mississippi'))
+ ['s', 'i', 's', 's', 'i', 'p', 'i']
+ >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
+ ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
+
+ This function is analagous to :func:`unique_everseen` and is subject to
+ the same performance considerations.
+
+ """
+ seen_set = set()
+ seen_list = []
+ use_key = key is not None
+
+ for element in iterable:
+ k = key(element) if use_key else element
+ try:
+ if k not in seen_set:
+ seen_set.add(k)
+ else:
+ yield element
+ except TypeError:
+ if k not in seen_list:
+ seen_list.append(k)
+ else:
+ yield element
+
+
+def duplicates_justseen(iterable, key=None):
+ """Yields serially-duplicate elements after their first appearance.
+
+ >>> list(duplicates_justseen('mississippi'))
+ ['s', 's', 'p']
+ >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
+ ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
+
+ This function is analagous to :func:`unique_justseen`.
+
+ """
+ return flatten(
+ map(
+ lambda group_tuple: islice_extended(group_tuple[1])[1:],
+ groupby(iterable, key),
+ )
+ )
+
+
+def minmax(iterable_or_value, *others, key=None, default=_marker):
+ """Returns both the smallest and largest items in an iterable
+ or the largest of two or more arguments.
+
+ >>> minmax([3, 1, 5])
+ (1, 5)
+
+ >>> minmax(4, 2, 6)
+ (2, 6)
+
+ If a *key* function is provided, it will be used to transform the input
+ items for comparison.
+
+ >>> minmax([5, 30], key=str) # '30' sorts before '5'
+ (30, 5)
+
+ If a *default* value is provided, it will be returned if there are no
+ input items.
+
+ >>> minmax([], default=(0, 0))
+ (0, 0)
+
+ Otherwise ``ValueError`` is raised.
+
+ This function is based on the
+ `recipe <http://code.activestate.com/recipes/577916/>`__ by
+ Raymond Hettinger and takes care to minimize the number of comparisons
+ performed.
+ """
+ iterable = (iterable_or_value, *others) if others else iterable_or_value
+
+ it = iter(iterable)
+
+ try:
+ lo = hi = next(it)
+ except StopIteration as e:
+ if default is _marker:
+ raise ValueError(
+ '`minmax()` argument is an empty iterable. '
+ 'Provide a `default` value to suppress this error.'
+ ) from e
+ return default
+
+ # Different branches depending on the presence of key. This saves a lot
+ # of unimportant copies which would slow the "key=None" branch
+ # significantly down.
+ if key is None:
+ for x, y in zip_longest(it, it, fillvalue=lo):
+ if y < x:
+ x, y = y, x
+ if x < lo:
+ lo = x
+ if hi < y:
+ hi = y
+
+ else:
+ lo_key = hi_key = key(lo)
+
+ for x, y in zip_longest(it, it, fillvalue=lo):
+
+ x_key, y_key = key(x), key(y)
+
+ if y_key < x_key:
+ x, y, x_key, y_key = y, x, y_key, x_key
+ if x_key < lo_key:
+ lo, lo_key = x, x_key
+ if hi_key < y_key:
+ hi, hi_key = y, y_key
+
+ return lo, hi
diff --git a/pkg_resources/_vendor/more_itertools/recipes.py b/pkg_resources/_vendor/more_itertools/recipes.py
new file mode 100644
index 0000000..a259642
--- /dev/null
+++ b/pkg_resources/_vendor/more_itertools/recipes.py
@@ -0,0 +1,698 @@
+"""Imported from the recipes section of the itertools documentation.
+
+All functions taken from the recipes section of the itertools library docs
+[1]_.
+Some backward-compatible usability improvements have been made.
+
+.. [1] http://docs.python.org/library/itertools.html#recipes
+
+"""
+import warnings
+from collections import deque
+from itertools import (
+ chain,
+ combinations,
+ count,
+ cycle,
+ groupby,
+ islice,
+ repeat,
+ starmap,
+ tee,
+ zip_longest,
+)
+import operator
+from random import randrange, sample, choice
+
+__all__ = [
+ 'all_equal',
+ 'before_and_after',
+ 'consume',
+ 'convolve',
+ 'dotproduct',
+ 'first_true',
+ 'flatten',
+ 'grouper',
+ 'iter_except',
+ 'ncycles',
+ 'nth',
+ 'nth_combination',
+ 'padnone',
+ 'pad_none',
+ 'pairwise',
+ 'partition',
+ 'powerset',
+ 'prepend',
+ 'quantify',
+ 'random_combination_with_replacement',
+ 'random_combination',
+ 'random_permutation',
+ 'random_product',
+ 'repeatfunc',
+ 'roundrobin',
+ 'sliding_window',
+ 'tabulate',
+ 'tail',
+ 'take',
+ 'triplewise',
+ 'unique_everseen',
+ 'unique_justseen',
+]
+
+
+def take(n, iterable):
+ """Return first *n* items of the iterable as a list.
+
+ >>> take(3, range(10))
+ [0, 1, 2]
+
+ If there are fewer than *n* items in the iterable, all of them are
+ returned.
+
+ >>> take(10, range(3))
+ [0, 1, 2]
+
+ """
+ return list(islice(iterable, n))
+
+
+def tabulate(function, start=0):
+ """Return an iterator over the results of ``func(start)``,
+ ``func(start + 1)``, ``func(start + 2)``...
+
+ *func* should be a function that accepts one integer argument.
+
+ If *start* is not specified it defaults to 0. It will be incremented each
+ time the iterator is advanced.
+
+ >>> square = lambda x: x ** 2
+ >>> iterator = tabulate(square, -3)
+ >>> take(4, iterator)
+ [9, 4, 1, 0]
+
+ """
+ return map(function, count(start))
+
+
+def tail(n, iterable):
+ """Return an iterator over the last *n* items of *iterable*.
+
+ >>> t = tail(3, 'ABCDEFG')
+ >>> list(t)
+ ['E', 'F', 'G']
+
+ """
+ return iter(deque(iterable, maxlen=n))
+
+
+def consume(iterator, n=None):
+ """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
+ entirely.
+
+ Efficiently exhausts an iterator without returning values. Defaults to
+ consuming the whole iterator, but an optional second argument may be
+ provided to limit consumption.
+
+ >>> i = (x for x in range(10))
+ >>> next(i)
+ 0
+ >>> consume(i, 3)
+ >>> next(i)
+ 4
+ >>> consume(i)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ StopIteration
+
+ If the iterator has fewer items remaining than the provided limit, the
+ whole iterator will be consumed.
+
+ >>> i = (x for x in range(3))
+ >>> consume(i, 5)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ StopIteration
+
+ """
+ # Use functions that consume iterators at C speed.
+ if n is None:
+ # feed the entire iterator into a zero-length deque
+ deque(iterator, maxlen=0)
+ else:
+ # advance to the empty slice starting at position n
+ next(islice(iterator, n, n), None)
+
+
+def nth(iterable, n, default=None):
+ """Returns the nth item or a default value.
+
+ >>> l = range(10)
+ >>> nth(l, 3)
+ 3
+ >>> nth(l, 20, "zebra")
+ 'zebra'
+
+ """
+ return next(islice(iterable, n, None), default)
+
+
+def all_equal(iterable):
+ """
+ Returns ``True`` if all the elements are equal to each other.
+
+ >>> all_equal('aaaa')
+ True
+ >>> all_equal('aaab')
+ False
+
+ """
+ g = groupby(iterable)
+ return next(g, True) and not next(g, False)
+
+
+def quantify(iterable, pred=bool):
+ """Return the how many times the predicate is true.
+
+ >>> quantify([True, False, True])
+ 2
+
+ """
+ return sum(map(pred, iterable))
+
+
+def pad_none(iterable):
+ """Returns the sequence of elements and then returns ``None`` indefinitely.
+
+ >>> take(5, pad_none(range(3)))
+ [0, 1, 2, None, None]
+
+ Useful for emulating the behavior of the built-in :func:`map` function.
+
+ See also :func:`padded`.
+
+ """
+ return chain(iterable, repeat(None))
+
+
+padnone = pad_none
+
+
+def ncycles(iterable, n):
+ """Returns the sequence elements *n* times
+
+ >>> list(ncycles(["a", "b"], 3))
+ ['a', 'b', 'a', 'b', 'a', 'b']
+
+ """
+ return chain.from_iterable(repeat(tuple(iterable), n))
+
+
+def dotproduct(vec1, vec2):
+ """Returns the dot product of the two iterables.
+
+ >>> dotproduct([10, 10], [20, 20])
+ 400
+
+ """
+ return sum(map(operator.mul, vec1, vec2))
+
+
+def flatten(listOfLists):
+ """Return an iterator flattening one level of nesting in a list of lists.
+
+ >>> list(flatten([[0, 1], [2, 3]]))
+ [0, 1, 2, 3]
+
+ See also :func:`collapse`, which can flatten multiple levels of nesting.
+
+ """
+ return chain.from_iterable(listOfLists)
+
+
+def repeatfunc(func, times=None, *args):
+ """Call *func* with *args* repeatedly, returning an iterable over the
+ results.
+
+ If *times* is specified, the iterable will terminate after that many
+ repetitions:
+
+ >>> from operator import add
+ >>> times = 4
+ >>> args = 3, 5
+ >>> list(repeatfunc(add, times, *args))
+ [8, 8, 8, 8]
+
+ If *times* is ``None`` the iterable will not terminate:
+
+ >>> from random import randrange
+ >>> times = None
+ >>> args = 1, 11
+ >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
+ [2, 4, 8, 1, 8, 4]
+
+ """
+ if times is None:
+ return starmap(func, repeat(args))
+ return starmap(func, repeat(args, times))
+
+
+def _pairwise(iterable):
+ """Returns an iterator of paired items, overlapping, from the original
+
+ >>> take(4, pairwise(count()))
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
+
+ On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
+
+ """
+ a, b = tee(iterable)
+ next(b, None)
+ yield from zip(a, b)
+
+
+try:
+ from itertools import pairwise as itertools_pairwise
+except ImportError:
+ pairwise = _pairwise
+else:
+
+ def pairwise(iterable):
+ yield from itertools_pairwise(iterable)
+
+ pairwise.__doc__ = _pairwise.__doc__
+
+
+def grouper(iterable, n, fillvalue=None):
+ """Collect data into fixed-length chunks or blocks.
+
+ >>> list(grouper('ABCDEFG', 3, 'x'))
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
+
+ """
+ if isinstance(iterable, int):
+ warnings.warn(
+ "grouper expects iterable as first parameter", DeprecationWarning
+ )
+ n, iterable = iterable, n
+ args = [iter(iterable)] * n
+ return zip_longest(fillvalue=fillvalue, *args)
+
+
+def roundrobin(*iterables):
+ """Yields an item from each iterable, alternating between them.
+
+ >>> list(roundrobin('ABC', 'D', 'EF'))
+ ['A', 'D', 'E', 'B', 'F', 'C']
+
+ This function produces the same output as :func:`interleave_longest`, but
+ may perform better for some inputs (in particular when the number of
+ iterables is small).
+
+ """
+ # Recipe credited to George Sakkis
+ pending = len(iterables)
+ nexts = cycle(iter(it).__next__ for it in iterables)
+ while pending:
+ try:
+ for next in nexts:
+ yield next()
+ except StopIteration:
+ pending -= 1
+ nexts = cycle(islice(nexts, pending))
+
+
+def partition(pred, iterable):
+ """
+ Returns a 2-tuple of iterables derived from the input iterable.
+ The first yields the items that have ``pred(item) == False``.
+ The second yields the items that have ``pred(item) == True``.
+
+ >>> is_odd = lambda x: x % 2 != 0
+ >>> iterable = range(10)
+ >>> even_items, odd_items = partition(is_odd, iterable)
+ >>> list(even_items), list(odd_items)
+ ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
+
+ If *pred* is None, :func:`bool` is used.
+
+ >>> iterable = [0, 1, False, True, '', ' ']
+ >>> false_items, true_items = partition(None, iterable)
+ >>> list(false_items), list(true_items)
+ ([0, False, ''], [1, True, ' '])
+
+ """
+ if pred is None:
+ pred = bool
+
+ evaluations = ((pred(x), x) for x in iterable)
+ t1, t2 = tee(evaluations)
+ return (
+ (x for (cond, x) in t1 if not cond),
+ (x for (cond, x) in t2 if cond),
+ )
+
+
+def powerset(iterable):
+ """Yields all possible subsets of the iterable.
+
+ >>> list(powerset([1, 2, 3]))
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
+
+ :func:`powerset` will operate on iterables that aren't :class:`set`
+ instances, so repeated elements in the input will produce repeated elements
+ in the output. Use :func:`unique_everseen` on the input to avoid generating
+ duplicates:
+
+ >>> seq = [1, 1, 0]
+ >>> list(powerset(seq))
+ [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
+ >>> from more_itertools import unique_everseen
+ >>> list(powerset(unique_everseen(seq)))
+ [(), (1,), (0,), (1, 0)]
+
+ """
+ s = list(iterable)
+ return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
+
+
+def unique_everseen(iterable, key=None):
+ """
+ Yield unique elements, preserving order.
+
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D']
+ >>> list(unique_everseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'D']
+
+ Sequences with a mix of hashable and unhashable items can be used.
+ The function will be slower (i.e., `O(n^2)`) for unhashable items.
+
+ Remember that ``list`` objects are unhashable - you can use the *key*
+ parameter to transform the list to a tuple (which is hashable) to
+ avoid a slowdown.
+
+ >>> iterable = ([1, 2], [2, 3], [1, 2])
+ >>> list(unique_everseen(iterable)) # Slow
+ [[1, 2], [2, 3]]
+ >>> list(unique_everseen(iterable, key=tuple)) # Faster
+ [[1, 2], [2, 3]]
+
+ Similary, you may want to convert unhashable ``set`` objects with
+ ``key=frozenset``. For ``dict`` objects,
+ ``key=lambda x: frozenset(x.items())`` can be used.
+
+ """
+ seenset = set()
+ seenset_add = seenset.add
+ seenlist = []
+ seenlist_add = seenlist.append
+ use_key = key is not None
+
+ for element in iterable:
+ k = key(element) if use_key else element
+ try:
+ if k not in seenset:
+ seenset_add(k)
+ yield element
+ except TypeError:
+ if k not in seenlist:
+ seenlist_add(k)
+ yield element
+
+
+def unique_justseen(iterable, key=None):
+ """Yields elements in order, ignoring serial duplicates
+
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D', 'A', 'B']
+ >>> list(unique_justseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'A', 'D']
+
+ """
+ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
+
+
+def iter_except(func, exception, first=None):
+ """Yields results from a function repeatedly until an exception is raised.
+
+ Converts a call-until-exception interface to an iterator interface.
+ Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
+ to end the loop.
+
+ >>> l = [0, 1, 2]
+ >>> list(iter_except(l.pop, IndexError))
+ [2, 1, 0]
+
+ Multiple exceptions can be specified as a stopping condition:
+
+ >>> l = [1, 2, 3, '...', 4, 5, 6]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ [7, 6, 5]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ [4, 3, 2]
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
+ []
+
+ """
+ try:
+ if first is not None:
+ yield first()
+ while 1:
+ yield func()
+ except exception:
+ pass
+
+
+def first_true(iterable, default=None, pred=None):
+ """
+ Returns the first true value in the iterable.
+
+ If no true value is found, returns *default*
+
+ If *pred* is not None, returns the first item for which
+ ``pred(item) == True`` .
+
+ >>> first_true(range(10))
+ 1
+ >>> first_true(range(10), pred=lambda x: x > 5)
+ 6
+ >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
+ 'missing'
+
+ """
+ return next(filter(pred, iterable), default)
+
+
+def random_product(*args, repeat=1):
+ """Draw an item at random from each of the input iterables.
+
+ >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
+ ('c', 3, 'Z')
+
+ If *repeat* is provided as a keyword argument, that many items will be
+ drawn from each iterable.
+
+ >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
+ ('a', 2, 'd', 3)
+
+ This equivalent to taking a random selection from
+ ``itertools.product(*args, **kwarg)``.
+
+ """
+ pools = [tuple(pool) for pool in args] * repeat
+ return tuple(choice(pool) for pool in pools)
+
+
+def random_permutation(iterable, r=None):
+ """Return a random *r* length permutation of the elements in *iterable*.
+
+ If *r* is not specified or is ``None``, then *r* defaults to the length of
+ *iterable*.
+
+ >>> random_permutation(range(5)) # doctest:+SKIP
+ (3, 4, 0, 1, 2)
+
+ This equivalent to taking a random selection from
+ ``itertools.permutations(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ r = len(pool) if r is None else r
+ return tuple(sample(pool, r))
+
+
+def random_combination(iterable, r):
+ """Return a random *r* length subsequence of the elements in *iterable*.
+
+ >>> random_combination(range(5), 3) # doctest:+SKIP
+ (2, 3, 4)
+
+ This equivalent to taking a random selection from
+ ``itertools.combinations(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(sample(range(n), r))
+ return tuple(pool[i] for i in indices)
+
+
+def random_combination_with_replacement(iterable, r):
+ """Return a random *r* length subsequence of elements in *iterable*,
+ allowing individual elements to be repeated.
+
+ >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
+ (0, 0, 1, 2, 2)
+
+ This equivalent to taking a random selection from
+ ``itertools.combinations_with_replacement(iterable, r)``.
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(randrange(n) for i in range(r))
+ return tuple(pool[i] for i in indices)
+
+
+def nth_combination(iterable, r, index):
+ """Equivalent to ``list(combinations(iterable, r))[index]``.
+
+ The subsequences of *iterable* that are of length *r* can be ordered
+ lexicographically. :func:`nth_combination` computes the subsequence at
+ sort position *index* directly, without computing the previous
+ subsequences.
+
+ >>> nth_combination(range(5), 3, 5)
+ (0, 3, 4)
+
+ ``ValueError`` will be raised If *r* is negative or greater than the length
+ of *iterable*.
+ ``IndexError`` will be raised if the given *index* is invalid.
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ if (r < 0) or (r > n):
+ raise ValueError
+
+ c = 1
+ k = min(r, n - r)
+ for i in range(1, k + 1):
+ c = c * (n - k + i) // i
+
+ if index < 0:
+ index += c
+
+ if (index < 0) or (index >= c):
+ raise IndexError
+
+ result = []
+ while r:
+ c, n, r = c * r // n, n - 1, r - 1
+ while index >= c:
+ index -= c
+ c, n = c * (n - r) // n, n - 1
+ result.append(pool[-1 - n])
+
+ return tuple(result)
+
+
+def prepend(value, iterator):
+ """Yield *value*, followed by the elements in *iterator*.
+
+ >>> value = '0'
+ >>> iterator = ['1', '2', '3']
+ >>> list(prepend(value, iterator))
+ ['0', '1', '2', '3']
+
+ To prepend multiple values, see :func:`itertools.chain`
+ or :func:`value_chain`.
+
+ """
+ return chain([value], iterator)
+
+
+def convolve(signal, kernel):
+ """Convolve the iterable *signal* with the iterable *kernel*.
+
+ >>> signal = (1, 2, 3, 4, 5)
+ >>> kernel = [3, 2, 1]
+ >>> list(convolve(signal, kernel))
+ [3, 8, 14, 20, 26, 14, 5]
+
+ Note: the input arguments are not interchangeable, as the *kernel*
+ is immediately consumed and stored.
+
+ """
+ kernel = tuple(kernel)[::-1]
+ n = len(kernel)
+ window = deque([0], maxlen=n) * n
+ for x in chain(signal, repeat(0, n - 1)):
+ window.append(x)
+ yield sum(map(operator.mul, kernel, window))
+
+
+def before_and_after(predicate, it):
+ """A variant of :func:`takewhile` that allows complete access to the
+ remainder of the iterator.
+
+ >>> it = iter('ABCdEfGhI')
+ >>> all_upper, remainder = before_and_after(str.isupper, it)
+ >>> ''.join(all_upper)
+ 'ABC'
+ >>> ''.join(remainder) # takewhile() would lose the 'd'
+ 'dEfGhI'
+
+ Note that the first iterator must be fully consumed before the second
+ iterator can generate valid results.
+ """
+ it = iter(it)
+ transition = []
+
+ def true_iterator():
+ for elem in it:
+ if predicate(elem):
+ yield elem
+ else:
+ transition.append(elem)
+ return
+
+ def remainder_iterator():
+ yield from transition
+ yield from it
+
+ return true_iterator(), remainder_iterator()
+
+
+def triplewise(iterable):
+ """Return overlapping triplets from *iterable*.
+
+ >>> list(triplewise('ABCDE'))
+ [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
+
+ """
+ for (a, _), (b, c) in pairwise(pairwise(iterable)):
+ yield a, b, c
+
+
+def sliding_window(iterable, n):
+ """Return a sliding window of width *n* over *iterable*.
+
+ >>> list(sliding_window(range(6), 4))
+ [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
+
+ If *iterable* has fewer than *n* items, then nothing is yielded:
+
+ >>> list(sliding_window(range(3), 4))
+ []
+
+ For a variant with more features, see :func:`windowed`.
+ """
+ it = iter(iterable)
+ window = deque(islice(it, n), maxlen=n)
+ if len(window) == n:
+ yield tuple(window)
+ for x in it:
+ window.append(x)
+ yield tuple(window)
diff --git a/pkg_resources/_vendor/packaging-21.3.dist-info/top_level.txt b/pkg_resources/_vendor/packaging-21.3.dist-info/top_level.txt
new file mode 100644
index 0000000..748809f
--- /dev/null
+++ b/pkg_resources/_vendor/packaging-21.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+packaging
diff --git a/pkg_resources/_vendor/packaging/__about__.py b/pkg_resources/_vendor/packaging/__about__.py
index 95d330e..3551bc2 100644
--- a/pkg_resources/_vendor/packaging/__about__.py
+++ b/pkg_resources/_vendor/packaging/__about__.py
@@ -1,21 +1,26 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
__all__ = [
- "__title__", "__summary__", "__uri__", "__version__", "__author__",
- "__email__", "__license__", "__copyright__",
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
-__version__ = "16.8"
+__version__ = "21.3"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
-__license__ = "BSD or Apache License, Version 2.0"
-__copyright__ = "Copyright 2014-2016 %s" % __author__
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "2014-2019 %s" % __author__
diff --git a/pkg_resources/_vendor/packaging/__init__.py b/pkg_resources/_vendor/packaging/__init__.py
index 5ee6220..3c50c5d 100644
--- a/pkg_resources/_vendor/packaging/__init__.py
+++ b/pkg_resources/_vendor/packaging/__init__.py
@@ -1,14 +1,25 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
from .__about__ import (
- __author__, __copyright__, __email__, __license__, __summary__, __title__,
- __uri__, __version__
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
)
__all__ = [
- "__title__", "__summary__", "__uri__", "__version__", "__author__",
- "__email__", "__license__", "__copyright__",
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
]
diff --git a/pkg_resources/_vendor/packaging/_compat.py b/pkg_resources/_vendor/packaging/_compat.py
deleted file mode 100644
index 210bb80..0000000
--- a/pkg_resources/_vendor/packaging/_compat.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-from __future__ import absolute_import, division, print_function
-
-import sys
-
-
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-
-# flake8: noqa
-
-if PY3:
- string_types = str,
-else:
- string_types = basestring,
-
-
-def with_metaclass(meta, *bases):
- """
- Create a base class with a metaclass.
- """
- # This requires a bit of explanation: the basic idea is to make a dummy
- # metaclass for one level of class instantiation that replaces itself with
- # the actual metaclass.
- class metaclass(meta):
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
diff --git a/pkg_resources/_vendor/packaging/_manylinux.py b/pkg_resources/_vendor/packaging/_manylinux.py
new file mode 100644
index 0000000..4c379aa
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/_manylinux.py
@@ -0,0 +1,301 @@
+import collections
+import functools
+import os
+import re
+import struct
+import sys
+import warnings
+from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader:
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file: IO[bytes]) -> None:
+ def unpack(fmt: str) -> int:
+ try:
+ data = file.read(struct.calcsize(fmt))
+ result: Tuple[int, ...] = struct.unpack(fmt, data)
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result[0]
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header() -> Optional[_ELFFileHeader]:
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf() -> bool:
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686() -> bool:
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_abi(arch: str) -> bool:
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
+
+
+# If glibc ever changes its major version, we need to know what the last
+# minor version was, so we can build the complete list of all versions.
+# For now, guess what the highest minor version might be, assume it will
+# be 50 for testing. Once this actually happens, update the dictionary
+# with the actual value.
+_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
+
+
+class _GLibCVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _glibc_version_string_confstr() -> Optional[str]:
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr("CS_GNU_LIBC_VERSION")
+ assert version_string is not None
+ _, version = version_string.split()
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes() -> Optional[str]:
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # We must also handle the special case where the executable is not a
+ # dynamically linked executable. This can occur when using musl libc,
+ # for example. In this situation, dlopen() will error, leading to an
+ # OSError. Interestingly, at least in the case of musl, there is no
+ # errno set on the OSError. The single string argument used to construct
+ # OSError comes from libc itself and is therefore not portable to
+ # hard code here. In any case, failure to call dlopen() means we
+ # can proceed, so we bail on our attempt.
+ try:
+ process_namespace = ctypes.CDLL(None)
+ except OSError:
+ return None
+
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str: str = gnu_get_libc_version()
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+def _glibc_version_string() -> Optional[str]:
+ """Returns glibc version string, or None if not using glibc."""
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
+ """Parse glibc version.
+
+ We use a regexp instead of str.split because we want to discard any
+ random junk that might come after the minor version -- this might happen
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ uses version strings like "2.20-2014.11"). See gh-3588.
+ """
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return -1, -1
+ return int(m.group("major")), int(m.group("minor"))
+
+
+@functools.lru_cache()
+def _get_glibc_version() -> Tuple[int, int]:
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return (-1, -1)
+ return _parse_glibc_version(version_str)
+
+
+# From PEP 513, PEP 600
+def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
+ sys_glibc = _get_glibc_version()
+ if sys_glibc < version:
+ return False
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+ except ImportError:
+ return True
+ if hasattr(_manylinux, "manylinux_compatible"):
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
+ if result is not None:
+ return bool(result)
+ return True
+ if version == _GLibCVersion(2, 5):
+ if hasattr(_manylinux, "manylinux1_compatible"):
+ return bool(_manylinux.manylinux1_compatible)
+ if version == _GLibCVersion(2, 12):
+ if hasattr(_manylinux, "manylinux2010_compatible"):
+ return bool(_manylinux.manylinux2010_compatible)
+ if version == _GLibCVersion(2, 17):
+ if hasattr(_manylinux, "manylinux2014_compatible"):
+ return bool(_manylinux.manylinux2014_compatible)
+ return True
+
+
+_LEGACY_MANYLINUX_MAP = {
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
+ (2, 17): "manylinux2014",
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
+ (2, 12): "manylinux2010",
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
+ (2, 5): "manylinux1",
+}
+
+
+def platform_tags(linux: str, arch: str) -> Iterator[str]:
+ if not _have_compatible_abi(arch):
+ return
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
+ too_old_glibc2 = _GLibCVersion(2, 16)
+ if arch in {"x86_64", "i686"}:
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
+ too_old_glibc2 = _GLibCVersion(2, 4)
+ current_glibc = _GLibCVersion(*_get_glibc_version())
+ glibc_max_list = [current_glibc]
+ # We can assume compatibility across glibc major versions.
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
+ #
+ # Build a list of maximum glibc versions so that we can
+ # output the canonical list of all glibc from current_glibc
+ # down to too_old_glibc2, including all intermediary versions.
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
+ for glibc_max in glibc_max_list:
+ if glibc_max.major == too_old_glibc2.major:
+ min_minor = too_old_glibc2.minor
+ else:
+ # For other glibc major versions oldest supported is (x, 0).
+ min_minor = -1
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
+ tag = "manylinux_{}_{}".format(*glibc_version)
+ if _is_compatible(tag, arch, glibc_version):
+ yield linux.replace("linux", tag)
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
+ if _is_compatible(legacy_tag, arch, glibc_version):
+ yield linux.replace("linux", legacy_tag)
diff --git a/pkg_resources/_vendor/packaging/_musllinux.py b/pkg_resources/_vendor/packaging/_musllinux.py
new file mode 100644
index 0000000..8ac3059
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/_musllinux.py
@@ -0,0 +1,136 @@
+"""PEP 656 support.
+
+This module implements logic to detect if the currently running Python is
+linked against musl, and what musl version is used.
+"""
+
+import contextlib
+import functools
+import operator
+import os
+import re
+import struct
+import subprocess
+import sys
+from typing import IO, Iterator, NamedTuple, Optional, Tuple
+
+
+def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
+
+
+def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
+ """Detect musl libc location by parsing the Python executable.
+
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
+ """
+ f.seek(0)
+ try:
+ ident = _read_unpacked(f, "16B")
+ except struct.error:
+ return None
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
+ return None
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
+
+ try:
+ # e_fmt: Format for program header.
+ # p_fmt: Format for section header.
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
+ e_fmt, p_fmt, p_idx = {
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
+ }[ident[4]]
+ except KeyError:
+ return None
+ else:
+ p_get = operator.itemgetter(*p_idx)
+
+ # Find the interpreter section and return its content.
+ try:
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
+ except struct.error:
+ return None
+ for i in range(e_phnum + 1):
+ f.seek(e_phoff + e_phentsize * i)
+ try:
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
+ except struct.error:
+ return None
+ if p_type != 3: # Not PT_INTERP.
+ continue
+ f.seek(p_offset)
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
+ if "musl" not in interpreter:
+ return None
+ return interpreter
+ return None
+
+
+class _MuslVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
+ if len(lines) < 2 or lines[0][:4] != "musl":
+ return None
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
+ if not m:
+ return None
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
+
+
+@functools.lru_cache()
+def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
+ """Detect currently-running musl runtime version.
+
+ This is done by checking the specified executable's dynamic linking
+ information, and invoking the loader to parse its output for a version
+ string. If the loader is musl, the output would be something like::
+
+ musl libc (x86_64)
+ Version 1.2.2
+ Dynamic Program Loader
+ """
+ with contextlib.ExitStack() as stack:
+ try:
+ f = stack.enter_context(open(executable, "rb"))
+ except OSError:
+ return None
+ ld = _parse_ld_musl_from_elf(f)
+ if not ld:
+ return None
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
+ return _parse_musl_version(proc.stderr)
+
+
+def platform_tags(arch: str) -> Iterator[str]:
+ """Generate musllinux tags compatible to the current platform.
+
+ :param arch: Should be the part of platform tag after the ``linux_``
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
+ prerequisite for the current platform to be musllinux-compatible.
+
+ :returns: An iterator of compatible musllinux tags.
+ """
+ sys_musl = _get_musl_version(sys.executable)
+ if sys_musl is None: # Python not dynamically linked against musl.
+ return
+ for minor in range(sys_musl.minor, -1, -1):
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sysconfig
+
+ plat = sysconfig.get_platform()
+ assert plat.startswith("linux-"), "not linux"
+
+ print("plat:", plat)
+ print("musl:", _get_musl_version(sys.executable))
+ print("tags:", end=" ")
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
+ print(t, end="\n ")
diff --git a/pkg_resources/_vendor/packaging/_structures.py b/pkg_resources/_vendor/packaging/_structures.py
index ccc2786..90a6465 100644
--- a/pkg_resources/_vendor/packaging/_structures.py
+++ b/pkg_resources/_vendor/packaging/_structures.py
@@ -1,68 +1,61 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
-class Infinity(object):
-
- def __repr__(self):
+class InfinityType:
+ def __repr__(self) -> str:
return "Infinity"
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(repr(self))
- def __lt__(self, other):
+ def __lt__(self, other: object) -> bool:
return False
- def __le__(self, other):
+ def __le__(self, other: object) -> bool:
return False
- def __eq__(self, other):
+ def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
- def __ne__(self, other):
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other):
+ def __gt__(self, other: object) -> bool:
return True
- def __ge__(self, other):
+ def __ge__(self, other: object) -> bool:
return True
- def __neg__(self):
+ def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
-Infinity = Infinity()
+Infinity = InfinityType()
-class NegativeInfinity(object):
- def __repr__(self):
+class NegativeInfinityType:
+ def __repr__(self) -> str:
return "-Infinity"
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(repr(self))
- def __lt__(self, other):
+ def __lt__(self, other: object) -> bool:
return True
- def __le__(self, other):
+ def __le__(self, other: object) -> bool:
return True
- def __eq__(self, other):
+ def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
- def __ne__(self, other):
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other):
+ def __gt__(self, other: object) -> bool:
return False
- def __ge__(self, other):
+ def __ge__(self, other: object) -> bool:
return False
- def __neg__(self):
+ def __neg__(self: object) -> InfinityType:
return Infinity
-NegativeInfinity = NegativeInfinity()
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/pkg_resources/_vendor/packaging/markers.py b/pkg_resources/_vendor/packaging/markers.py
index 892e578..18769b0 100644
--- a/pkg_resources/_vendor/packaging/markers.py
+++ b/pkg_resources/_vendor/packaging/markers.py
@@ -1,26 +1,37 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+from pkg_resources.extern.pyparsing import ( # noqa: N817
+ Forward,
+ Group,
+ Literal as L,
+ ParseException,
+ ParseResults,
+ QuotedString,
+ ZeroOrMore,
+ stringEnd,
+ stringStart,
+)
-from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
-from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
-from pkg_resources.extern.pyparsing import Literal as L # noqa
-
-from ._compat import string_types
-from .specifiers import Specifier, InvalidSpecifier
-
+from .specifiers import InvalidSpecifier, Specifier
__all__ = [
- "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
- "Marker", "default_environment",
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
]
+Operator = Callable[[str, str], bool]
+
class InvalidMarker(ValueError):
"""
@@ -41,78 +52,67 @@ class UndefinedEnvironmentName(ValueError):
"""
-class Node(object):
-
- def __init__(self, value):
+class Node:
+ def __init__(self, value: Any) -> None:
self.value = value
- def __str__(self):
+ def __str__(self) -> str:
return str(self.value)
- def __repr__(self):
- return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__}('{self}')>"
- def serialize(self):
+ def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
-
- def serialize(self):
+ def serialize(self) -> str:
return str(self)
class Value(Node):
-
- def serialize(self):
- return '"{0}"'.format(self)
+ def serialize(self) -> str:
+ return f'"{self}"'
class Op(Node):
-
- def serialize(self):
+ def serialize(self) -> str:
return str(self)
VARIABLE = (
- L("implementation_version") |
- L("platform_python_implementation") |
- L("implementation_name") |
- L("python_full_version") |
- L("platform_release") |
- L("platform_version") |
- L("platform_machine") |
- L("platform_system") |
- L("python_version") |
- L("sys_platform") |
- L("os_name") |
- L("os.name") | # PEP-345
- L("sys.platform") | # PEP-345
- L("platform.version") | # PEP-345
- L("platform.machine") | # PEP-345
- L("platform.python_implementation") | # PEP-345
- L("python_implementation") | # undocumented setuptools legacy
- L("extra")
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
)
ALIASES = {
- 'os.name': 'os_name',
- 'sys.platform': 'sys_platform',
- 'platform.version': 'platform_version',
- 'platform.machine': 'platform_machine',
- 'platform.python_implementation': 'platform_python_implementation',
- 'python_implementation': 'platform_python_implementation'
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
- L("===") |
- L("==") |
- L(">=") |
- L("<=") |
- L("!=") |
- L("~=") |
- L(">") |
- L("<")
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
@@ -138,22 +138,28 @@ MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
-def _coerce_parse_result(results):
+def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
-def _format_marker(marker, first=True):
- assert isinstance(marker, (list, tuple, string_types))
+def _format_marker(
+ marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
+) -> str:
+
+ assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
- if (isinstance(marker, list) and len(marker) == 1 and
- isinstance(marker[0], (list, tuple))):
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
return _format_marker(marker[0])
if isinstance(marker, list):
@@ -168,7 +174,7 @@ def _format_marker(marker, first=True):
return marker
-_operators = {
+_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
@@ -180,7 +186,7 @@ _operators = {
}
-def _eval_op(lhs, op, rhs):
+def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
@@ -188,34 +194,36 @@ def _eval_op(lhs, op, rhs):
else:
return spec.contains(lhs)
- oper = _operators.get(op.serialize())
+ oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
- raise UndefinedComparison(
- "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
- )
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
-_undefined = object()
+class Undefined:
+ pass
+
+
+_undefined = Undefined()
-def _get_env(environment, name):
- value = environment.get(name, _undefined)
+def _get_env(environment: Dict[str, str], name: str) -> str:
+ value: Union[str, Undefined] = environment.get(name, _undefined)
- if value is _undefined:
+ if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
- "{0!r} does not exist in evaluation environment.".format(name)
+ f"{name!r} does not exist in evaluation environment."
)
return value
-def _evaluate_markers(markers, environment):
- groups = [[]]
+def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
+ groups: List[List[bool]] = [[]]
for marker in markers:
- assert isinstance(marker, (list, tuple, string_types))
+ assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
@@ -238,22 +246,17 @@ def _evaluate_markers(markers, environment):
return any(all(item) for item in groups)
-def format_full_version(info):
- version = '{0.major}.{0.minor}.{0.micro}'.format(info)
+def format_full_version(info: "sys._version_info") -> str:
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
- if kind != 'final':
+ if kind != "final":
version += kind[0] + str(info.serial)
return version
-def default_environment():
- if hasattr(sys, 'implementation'):
- iver = format_full_version(sys.implementation.version)
- implementation_name = sys.implementation.name
- else:
- iver = '0'
- implementation_name = ''
-
+def default_environment() -> Dict[str, str]:
+ iver = format_full_version(sys.implementation.version)
+ implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
@@ -264,28 +267,28 @@ def default_environment():
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
- "python_version": platform.python_version()[:3],
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
-class Marker(object):
-
- def __init__(self, marker):
+class Marker:
+ def __init__(self, marker: str) -> None:
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
- err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
- marker, marker[e.loc:e.loc + 8])
- raise InvalidMarker(err_str)
+ raise InvalidMarker(
+ f"Invalid marker: {marker!r}, parse error at "
+ f"{marker[e.loc : e.loc + 8]!r}"
+ )
- def __str__(self):
+ def __str__(self) -> str:
return _format_marker(self._markers)
- def __repr__(self):
- return "<Marker({0!r})>".format(str(self))
+ def __repr__(self) -> str:
+ return f"<Marker('{self}')>"
- def evaluate(self, environment=None):
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
diff --git a/pkg_resources/_vendor/packaging/requirements.py b/pkg_resources/_vendor/packaging/requirements.py
index 0c8c4a3..6af14ec 100644
--- a/pkg_resources/_vendor/packaging/requirements.py
+++ b/pkg_resources/_vendor/packaging/requirements.py
@@ -1,15 +1,24 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
-import string
import re
-
-from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
-from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
-from pkg_resources.extern.pyparsing import Literal as L # noqa
-from pkg_resources.extern.six.moves.urllib import parse as urlparse
+import string
+import urllib.parse
+from typing import List, Optional as TOptional, Set
+
+from pkg_resources.extern.pyparsing import ( # noqa
+ Combine,
+ Literal as L,
+ Optional,
+ ParseException,
+ Regex,
+ Word,
+ ZeroOrMore,
+ originalTextFor,
+ stringEnd,
+ stringStart,
+)
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
@@ -38,8 +47,8 @@ IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
-URI = Regex(r'[^ ]+')("url")
-URL = (AT + URI)
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
@@ -48,31 +57,34 @@ VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
-VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
- joinString=",", adjacent=False)("_raw_spec")
-_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
-_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
- lambda s, l, t: Marker(s[t._original_start:t._original_end])
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
)
-MARKER_SEPERATOR = SEMICOLON
-MARKER = MARKER_SEPERATOR + MARKER_EXPR
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
-NAMED_REQUIREMENT = \
- NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
-class Requirement(object):
+class Requirement:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
@@ -85,43 +97,50 @@ class Requirement(object):
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
- def __init__(self, requirement_string):
+ def __init__(self, requirement_string: str) -> None:
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
- "Invalid requirement, parse error at \"{0!r}\"".format(
- requirement_string[e.loc:e.loc + 8]))
+ f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
+ )
- self.name = req.name
+ self.name: str = req.name
if req.url:
- parsed_url = urlparse.urlparse(req.url)
- if not (parsed_url.scheme and parsed_url.netloc) or (
- not parsed_url.scheme and not parsed_url.netloc):
- raise InvalidRequirement("Invalid URL given")
- self.url = req.url
+ parsed_url = urllib.parse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urllib.parse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement(f"Invalid URL: {req.url}")
+ self.url: TOptional[str] = req.url
else:
self.url = None
- self.extras = set(req.extras.asList() if req.extras else [])
- self.specifier = SpecifierSet(req.specifier)
- self.marker = req.marker if req.marker else None
+ self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
+ self.specifier: SpecifierSet = SpecifierSet(req.specifier)
+ self.marker: TOptional[Marker] = req.marker if req.marker else None
- def __str__(self):
- parts = [self.name]
+ def __str__(self) -> str:
+ parts: List[str] = [self.name]
if self.extras:
- parts.append("[{0}]".format(",".join(sorted(self.extras))))
+ formatted_extras = ",".join(sorted(self.extras))
+ parts.append(f"[{formatted_extras}]")
if self.specifier:
parts.append(str(self.specifier))
if self.url:
- parts.append("@ {0}".format(self.url))
+ parts.append(f"@ {self.url}")
+ if self.marker:
+ parts.append(" ")
if self.marker:
- parts.append("; {0}".format(self.marker))
+ parts.append(f"; {self.marker}")
return "".join(parts)
- def __repr__(self):
- return "<Requirement({0!r})>".format(str(self))
+ def __repr__(self) -> str:
+ return f"<Requirement('{self}')>"
diff --git a/pkg_resources/_vendor/packaging/specifiers.py b/pkg_resources/_vendor/packaging/specifiers.py
index 7f5a76c..0e218a6 100644
--- a/pkg_resources/_vendor/packaging/specifiers.py
+++ b/pkg_resources/_vendor/packaging/specifiers.py
@@ -1,15 +1,33 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
-
-from ._compat import string_types, with_metaclass
-from .version import Version, LegacyVersion, parse
+import warnings
+from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+from .utils import canonicalize_version
+from .version import LegacyVersion, Version, parse
+
+ParsedVersion = Union[Version, LegacyVersion]
+UnparsedVersion = Union[Version, LegacyVersion, str]
+VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
+CallableOperator = Callable[[ParsedVersion, str], bool]
class InvalidSpecifier(ValueError):
@@ -18,57 +36,51 @@ class InvalidSpecifier(ValueError):
"""
-class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
-
+class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
- def __str__(self):
+ def __str__(self) -> str:
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
- def __hash__(self):
+ def __hash__(self) -> int:
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
- def __eq__(self, other):
+ def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
- @abc.abstractmethod
- def __ne__(self, other):
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are not equal.
- """
-
@abc.abstractproperty
- def prereleases(self):
+ def prereleases(self) -> Optional[bool]:
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
- def prereleases(self, value):
+ def prereleases(self, value: bool) -> None:
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
- def contains(self, item, prereleases=None):
+ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
- def filter(self, iterable, prereleases=None):
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
@@ -77,14 +89,15 @@ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
class _IndividualSpecifier(BaseSpecifier):
- _operators = {}
+ _operators: Dict[str, str] = {}
+ _regex: Pattern[str]
- def __init__(self, spec="", prereleases=None):
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
match = self._regex.search(spec)
if not match:
- raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+ raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
- self._spec = (
+ self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
@@ -92,94 +105,93 @@ class _IndividualSpecifier(BaseSpecifier):
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
- def __repr__(self):
+ def __repr__(self) -> str:
pre = (
- ", prereleases={0!r}".format(self.prereleases)
+ f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
- return "<{0}({1!r}{2})>".format(
- self.__class__.__name__,
- str(self),
- pre,
- )
-
- def __str__(self):
- return "{0}{1}".format(*self._spec)
+ return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
- def __hash__(self):
- return hash(self._spec)
+ def __str__(self) -> str:
+ return "{}{}".format(*self._spec)
- def __eq__(self, other):
- if isinstance(other, string_types):
- try:
- other = self.__class__(other)
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
+ @property
+ def _canonical_spec(self) -> Tuple[str, str]:
+ return self._spec[0], canonicalize_version(self._spec[1])
- return self._spec == other._spec
+ def __hash__(self) -> int:
+ return hash(self._canonical_spec)
- def __ne__(self, other):
- if isinstance(other, string_types):
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, str):
try:
- other = self.__class__(other)
+ other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
- return self._spec != other._spec
+ return self._canonical_spec == other._canonical_spec
- def _get_operator(self, op):
- return getattr(self, "_compare_{0}".format(self._operators[op]))
+ def _get_operator(self, op: str) -> CallableOperator:
+ operator_callable: CallableOperator = getattr(
+ self, f"_compare_{self._operators[op]}"
+ )
+ return operator_callable
- def _coerce_version(self, version):
+ def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
- def operator(self):
+ def operator(self) -> str:
return self._spec[0]
@property
- def version(self):
+ def version(self) -> str:
return self._spec[1]
@property
- def prereleases(self):
+ def prereleases(self) -> Optional[bool]:
return self._prereleases
@prereleases.setter
- def prereleases(self, value):
+ def prereleases(self, value: bool) -> None:
self._prereleases = value
- def __contains__(self, item):
+ def __contains__(self, item: str) -> bool:
return self.contains(item)
- def contains(self, item, prereleases=None):
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
- item = self._coerce_version(item)
+ normalized_item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
- if item.is_prerelease and not prereleases:
+ if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
- return self._get_operator(self.operator)(item, self.version)
+ operator_callable: CallableOperator = self._get_operator(self.operator)
+ return operator_callable(normalized_item, self.version)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
- def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
@@ -192,13 +204,14 @@ class _IndividualSpecifier(BaseSpecifier):
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
- # prereleases, then we'll store it for later incase nothing
+ # prereleases, then we'll store it for later in case nothing
# else matches this specifier.
- if (parsed_version.is_prerelease and not
- (prereleases or self.prereleases)):
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
- # accepting prereleases from the begining.
+ # accepting prereleases from the beginning.
else:
yielded = True
yield version
@@ -213,8 +226,7 @@ class _IndividualSpecifier(BaseSpecifier):
class LegacySpecifier(_IndividualSpecifier):
- _regex_str = (
- r"""
+ _regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
@@ -225,10 +237,8 @@ class LegacySpecifier(_IndividualSpecifier):
# them, and a comma since it's a version separator.
)
"""
- )
- _regex = re.compile(
- r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
@@ -239,43 +249,56 @@ class LegacySpecifier(_IndividualSpecifier):
">": "greater_than",
}
- def _coerce_version(self, version):
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+ super().__init__(spec, prereleases)
+
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
- def _compare_equal(self, prospective, spec):
+ def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
return prospective == self._coerce_version(spec)
- def _compare_not_equal(self, prospective, spec):
+ def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
return prospective != self._coerce_version(spec)
- def _compare_less_than_equal(self, prospective, spec):
+ def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
return prospective <= self._coerce_version(spec)
- def _compare_greater_than_equal(self, prospective, spec):
+ def _compare_greater_than_equal(
+ self, prospective: LegacyVersion, spec: str
+ ) -> bool:
return prospective >= self._coerce_version(spec)
- def _compare_less_than(self, prospective, spec):
+ def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
return prospective < self._coerce_version(spec)
- def _compare_greater_than(self, prospective, spec):
+ def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
return prospective > self._coerce_version(spec)
-def _require_version_compare(fn):
+def _require_version_compare(
+ fn: Callable[["Specifier", ParsedVersion, str], bool]
+) -> Callable[["Specifier", ParsedVersion, str], bool]:
@functools.wraps(fn)
- def wrapped(self, prospective, spec):
+ def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
+
return wrapped
class Specifier(_IndividualSpecifier):
- _regex_str = (
- r"""
+ _regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
@@ -367,10 +390,8 @@ class Specifier(_IndividualSpecifier):
)
)
"""
- )
- _regex = re.compile(
- r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
@@ -384,7 +405,8 @@ class Specifier(_IndividualSpecifier):
}
@_require_version_compare
- def _compare_compatible(self, prospective, spec):
+ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
+
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
@@ -392,76 +414,86 @@ class Specifier(_IndividualSpecifier):
# the other specifiers.
# We want everything but the last item in the version, but we want to
- # ignore post and dev releases and we want to treat the pre-release as
- # it's own separate segment.
+ # ignore suffix segments.
prefix = ".".join(
- list(
- itertools.takewhile(
- lambda x: (not x.startswith("post") and not
- x.startswith("dev")),
- _version_split(spec),
- )
- )[:-1]
+ list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
- return (self._get_operator(">=")(prospective, spec) and
- self._get_operator("==")(prospective, prefix))
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
@_require_version_compare
- def _compare_equal(self, prospective, spec):
+ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
- spec = _version_split(spec[:-2]) # Remove the trailing .*
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
- prospective = _version_split(str(prospective))
+ split_prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
- prospective = prospective[:len(spec)]
+ shortened_prospective = split_prospective[: len(split_spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
- spec, prospective = _pad_version(spec, prospective)
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
else:
# Convert our spec string into a Version
- spec = Version(spec)
+ spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
- if not spec.local:
+ if not spec_version.local:
prospective = Version(prospective.public)
- return prospective == spec
+ return prospective == spec_version
@_require_version_compare
- def _compare_not_equal(self, prospective, spec):
+ def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
@_require_version_compare
- def _compare_less_than_equal(self, prospective, spec):
- return prospective <= Version(spec)
+ def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
@_require_version_compare
- def _compare_greater_than_equal(self, prospective, spec):
- return prospective >= Version(spec)
+ def _compare_greater_than_equal(
+ self, prospective: ParsedVersion, spec: str
+ ) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
@_require_version_compare
- def _compare_less_than(self, prospective, spec):
+ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
- spec = Version(spec)
+ spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
@@ -483,10 +515,11 @@ class Specifier(_IndividualSpecifier):
return True
@_require_version_compare
- def _compare_greater_than(self, prospective, spec):
+ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
- spec = Version(spec)
+ spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
@@ -503,7 +536,7 @@ class Specifier(_IndividualSpecifier):
return False
# Ensure that we do not allow a local version of the version mentioned
- # in the specifier, which is techincally greater than, to match.
+ # in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
@@ -513,11 +546,12 @@ class Specifier(_IndividualSpecifier):
# same version in the spec.
return True
- def _compare_arbitrary(self, prospective, spec):
+ def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
@property
- def prereleases(self):
+ def prereleases(self) -> bool:
+
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
@@ -541,15 +575,15 @@ class Specifier(_IndividualSpecifier):
return False
@prereleases.setter
- def prereleases(self, value):
+ def prereleases(self, value: bool) -> None:
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-def _version_split(version):
- result = []
+def _version_split(version: str) -> List[str]:
+ result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
@@ -559,7 +593,13 @@ def _version_split(version):
return result
-def _pad_version(left, right):
+def _is_not_suffix(segment: str) -> bool:
+ return not any(
+ segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
+ )
+
+
+def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
@@ -567,36 +607,29 @@ def _pad_version(left, right):
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
- left_split.append(left[len(left_split[0]):])
- right_split.append(right[len(right_split[0]):])
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
# Insert our padding
- left_split.insert(
- 1,
- ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
- )
- right_split.insert(
- 1,
- ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
- )
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
- return (
- list(itertools.chain(*left_split)),
- list(itertools.chain(*right_split)),
- )
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
+ def __init__(
+ self, specifiers: str = "", prereleases: Optional[bool] = None
+ ) -> None:
- def __init__(self, specifiers="", prereleases=None):
- # Split on , to break each indidivual specifier into it's own item, and
+ # Split on , to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
- specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
- parsed = set()
- for specifier in specifiers:
+ parsed: Set[_IndividualSpecifier] = set()
+ for specifier in split_specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
@@ -609,23 +642,23 @@ class SpecifierSet(BaseSpecifier):
# we accept prereleases or not.
self._prereleases = prereleases
- def __repr__(self):
+ def __repr__(self) -> str:
pre = (
- ", prereleases={0!r}".format(self.prereleases)
+ f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
- return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+ return f"<SpecifierSet({str(self)!r}{pre})>"
- def __str__(self):
+ def __str__(self) -> str:
return ",".join(sorted(str(s) for s in self._specs))
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(self._specs)
- def __and__(self, other):
- if isinstance(other, string_types):
+ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
+ if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
@@ -647,34 +680,23 @@ class SpecifierSet(BaseSpecifier):
return specifier
- def __eq__(self, other):
- if isinstance(other, string_types):
- other = SpecifierSet(other)
- elif isinstance(other, _IndividualSpecifier):
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, (str, _IndividualSpecifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
- def __ne__(self, other):
- if isinstance(other, string_types):
- other = SpecifierSet(other)
- elif isinstance(other, _IndividualSpecifier):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs != other._specs
-
- def __len__(self):
+ def __len__(self) -> int:
return len(self._specs)
- def __iter__(self):
+ def __iter__(self) -> Iterator[_IndividualSpecifier]:
return iter(self._specs)
@property
- def prereleases(self):
+ def prereleases(self) -> Optional[bool]:
+
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
@@ -691,13 +713,16 @@ class SpecifierSet(BaseSpecifier):
return any(s.prereleases for s in self._specs)
@prereleases.setter
- def prereleases(self, value):
+ def prereleases(self, value: bool) -> None:
self._prereleases = value
- def __contains__(self, item):
+ def __contains__(self, item: UnparsedVersion) -> bool:
return self.contains(item)
- def contains(self, item, prereleases=None):
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
@@ -721,12 +746,12 @@ class SpecifierSet(BaseSpecifier):
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
- return all(
- s.contains(item, prereleases=prereleases)
- for s in self._specs
- )
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
- def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
@@ -744,8 +769,11 @@ class SpecifierSet(BaseSpecifier):
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
- filtered = []
- found_prereleases = []
+ filtered: List[VersionTypeVar] = []
+ found_prereleases: List[VersionTypeVar] = []
+
+ item: UnparsedVersion
+ parsed_version: Union[Version, LegacyVersion]
for item in iterable:
# Ensure that we some kind of Version class for this item.
diff --git a/pkg_resources/_vendor/packaging/tags.py b/pkg_resources/_vendor/packaging/tags.py
new file mode 100644
index 0000000..9a3d25a
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/tags.py
@@ -0,0 +1,487 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import logging
+import platform
+import sys
+import sysconfig
+from importlib.machinery import EXTENSION_SUFFIXES
+from typing import (
+ Dict,
+ FrozenSet,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
+
+from . import _manylinux, _musllinux
+
+logger = logging.getLogger(__name__)
+
+PythonVersion = Sequence[int]
+MacVersion = Tuple[int, int]
+
+INTERPRETER_SHORT_NAMES: Dict[str, str] = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+}
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag:
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
+
+ def __init__(self, interpreter: str, abi: str, platform: str) -> None:
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
+ # times when scanning a page of links for packages with tags matching that
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
+ # downstream consumers.
+ self._hash = hash((self._interpreter, self._abi, self._platform))
+
+ @property
+ def interpreter(self) -> str:
+ return self._interpreter
+
+ @property
+ def abi(self) -> str:
+ return self._abi
+
+ @property
+ def platform(self) -> str:
+ return self._platform
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
+ and (self._platform == other._platform)
+ and (self._abi == other._abi)
+ and (self._interpreter == other._interpreter)
+ )
+
+ def __hash__(self) -> int:
+ return self._hash
+
+ def __str__(self) -> str:
+ return f"{self._interpreter}-{self._abi}-{self._platform}"
+
+ def __repr__(self) -> str:
+ return f"<{self} @ {id(self)}>"
+
+
+def parse_tag(tag: str) -> FrozenSet[Tag]:
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string: str) -> str:
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version: PythonVersion) -> bool:
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append(f"cp{version}")
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version: Optional[PythonVersion] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = f"cp{_version_nodot(python_version[:2])}"
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
+ yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi() -> Iterator[str]:
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter: Optional[str] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield f"py{_version_nodot(py_version[:2])}"
+ yield f"py{py_version[0]}"
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield f"py{_version_nodot((py_version[0], minor))}"
+
+
+def compatible_tags(
+ python_version: Optional[PythonVersion] = None,
+ interpreter: Optional[str] = None,
+ platforms: Optional[Iterable[str]] = None,
+) -> Iterator[Tag]:
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ if cpu_arch in {"arm64", "x86_64"}:
+ formats.append("universal2")
+
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
+ formats.append("universal")
+
+ return formats
+
+
+def mac_platforms(
+ version: Optional[MacVersion] = None, arch: Optional[str] = None
+) -> Iterator[str]:
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver()
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+
+ if (10, 0) <= version and version < (11, 0):
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
+ # "minor" version number. The major version was always 10.
+ for minor_version in range(version[1], -1, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=10, minor=minor_version, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Starting with Mac OS 11, each yearly release bumps the major version
+ # number. The minor versions are now the midyear updates.
+ for major_version in range(version[0], 10, -1):
+ compat_version = major_version, 0
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=major_version, minor=0, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
+ # releases exist.
+ #
+ # However, the "universal2" binary format can have a
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
+ # that version of macOS.
+ if arch == "x86_64":
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+ else:
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_format = "universal2"
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
+ linux = _normalize_string(sysconfig.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ _, arch = linux.split("_", 1)
+ yield from _manylinux.platform_tags(linux, arch)
+ yield from _musllinux.platform_tags(arch)
+ yield linux
+
+
+def _generic_platforms() -> Iterator[str]:
+ yield _normalize_string(sysconfig.get_platform())
+
+
+def platform_tags() -> Iterator[str]:
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name() -> str:
+ """
+ Returns the name of the running interpreter.
+ """
+ name = sys.implementation.name
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(*, warn: bool = False) -> str:
+ """
+ Returns the version of the running interpreter.
+ """
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version: PythonVersion) -> str:
+ return "".join(map(str, version))
+
+
+def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ yield from cpython_tags(warn=warn)
+ else:
+ yield from generic_tags()
+
+ if interp_name == "pp":
+ yield from compatible_tags(interpreter="pp3")
+ else:
+ yield from compatible_tags()
diff --git a/pkg_resources/_vendor/packaging/utils.py b/pkg_resources/_vendor/packaging/utils.py
index 942387c..bab11b8 100644
--- a/pkg_resources/_vendor/packaging/utils.py
+++ b/pkg_resources/_vendor/packaging/utils.py
@@ -1,14 +1,136 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
import re
+from typing import FrozenSet, NewType, Tuple, Union, cast
+
+from .tags import Tag, parse_tag
+from .version import InvalidVersion, Version
+
+BuildTag = Union[Tuple[()], Tuple[int, str]]
+NormalizedName = NewType("NormalizedName", str)
+
+
+class InvalidWheelFilename(ValueError):
+ """
+ An invalid wheel filename was found, users should refer to PEP 427.
+ """
+
+
+class InvalidSdistFilename(ValueError):
+ """
+ An invalid sdist filename was found, users should refer to the packaging user guide.
+ """
_canonicalize_regex = re.compile(r"[-_.]+")
+# PEP 427: The build number must start with a digit.
+_build_tag_regex = re.compile(r"(\d+)(.*)")
-def canonicalize_name(name):
+def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
- return _canonicalize_regex.sub("-", name).lower()
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast(NormalizedName, value)
+
+
+def canonicalize_version(version: Union[Version, str]) -> str:
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+ if isinstance(version, str):
+ try:
+ parsed = Version(version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return version
+ else:
+ parsed = version
+
+ parts = []
+
+ # Epoch
+ if parsed.epoch != 0:
+ parts.append(f"{parsed.epoch}!")
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
+
+ # Pre-release
+ if parsed.pre is not None:
+ parts.append("".join(str(x) for x in parsed.pre))
+
+ # Post-release
+ if parsed.post is not None:
+ parts.append(f".post{parsed.post}")
+
+ # Development release
+ if parsed.dev is not None:
+ parts.append(f".dev{parsed.dev}")
+
+ # Local version segment
+ if parsed.local is not None:
+ parts.append(f"+{parsed.local}")
+
+ return "".join(parts)
+
+
+def parse_wheel_filename(
+ filename: str,
+) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
+ if not filename.endswith(".whl"):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (extension must be '.whl'): {filename}"
+ )
+
+ filename = filename[:-4]
+ dashes = filename.count("-")
+ if dashes not in (4, 5):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (wrong number of parts): {filename}"
+ )
+
+ parts = filename.split("-", dashes - 2)
+ name_part = parts[0]
+ # See PEP 427 for the rules on escaping the project name
+ if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
+ raise InvalidWheelFilename(f"Invalid project name: {filename}")
+ name = canonicalize_name(name_part)
+ version = Version(parts[1])
+ if dashes == 5:
+ build_part = parts[2]
+ build_match = _build_tag_regex.match(build_part)
+ if build_match is None:
+ raise InvalidWheelFilename(
+ f"Invalid build number: {build_part} in '{filename}'"
+ )
+ build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
+ else:
+ build = ()
+ tags = parse_tag(parts[-1])
+ return (name, version, build, tags)
+
+
+def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
+ if filename.endswith(".tar.gz"):
+ file_stem = filename[: -len(".tar.gz")]
+ elif filename.endswith(".zip"):
+ file_stem = filename[: -len(".zip")]
+ else:
+ raise InvalidSdistFilename(
+ f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
+ f" {filename}"
+ )
+
+ # We are requiring a PEP 440 version, which cannot contain dashes,
+ # so we split on the last dash.
+ name_part, sep, version_part = file_stem.rpartition("-")
+ if not sep:
+ raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
+
+ name = canonicalize_name(name_part)
+ version = Version(version_part)
+ return (name, version)
diff --git a/pkg_resources/_vendor/packaging/version.py b/pkg_resources/_vendor/packaging/version.py
index 83b5ee8..de9a09a 100644
--- a/pkg_resources/_vendor/packaging/version.py
+++ b/pkg_resources/_vendor/packaging/version.py
@@ -1,27 +1,45 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
-from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
-
-from ._structures import Infinity
-
-
-__all__ = [
- "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
+import warnings
+from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+SubLocalType = Union[InfiniteTypes, int, str]
+LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+]
+CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+]
+LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
]
-
_Version = collections.namedtuple(
- "_Version",
- ["epoch", "release", "dev", "pre", "post", "local"],
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
-def parse(version):
+def parse(version: str) -> Union["LegacyVersion", "Version"]:
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
@@ -39,79 +57,126 @@ class InvalidVersion(ValueError):
"""
-class _BaseVersion(object):
+class _BaseVersion:
+ _key: Union[CmpKey, LegacyCmpKey]
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(self._key)
- def __lt__(self, other):
- return self._compare(other, lambda s, o: s < o)
+ # Please keep the duplicated `isinstance` check
+ # in the six comparisons hereunder
+ # unless you find a way to avoid adding overhead function calls.
+ def __lt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
- def __le__(self, other):
- return self._compare(other, lambda s, o: s <= o)
+ return self._key < other._key
- def __eq__(self, other):
- return self._compare(other, lambda s, o: s == o)
+ def __le__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
- def __ge__(self, other):
- return self._compare(other, lambda s, o: s >= o)
+ return self._key <= other._key
- def __gt__(self, other):
- return self._compare(other, lambda s, o: s > o)
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
- def __ne__(self, other):
- return self._compare(other, lambda s, o: s != o)
+ return self._key == other._key
- def _compare(self, other, method):
+ def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
- return method(self._key, other._key)
+ return self._key >= other._key
+ def __gt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
-class LegacyVersion(_BaseVersion):
+ return self._key > other._key
+
+ def __ne__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key != other._key
- def __init__(self, version):
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
- def __str__(self):
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def __str__(self) -> str:
return self._version
- def __repr__(self):
- return "<LegacyVersion({0})>".format(repr(str(self)))
+ def __repr__(self) -> str:
+ return f"<LegacyVersion('{self}')>"
@property
- def public(self):
+ def public(self) -> str:
return self._version
@property
- def base_version(self):
+ def base_version(self) -> str:
return self._version
@property
- def local(self):
+ def epoch(self) -> int:
+ return -1
+
+ @property
+ def release(self) -> None:
return None
@property
- def is_prerelease(self):
+ def pre(self) -> None:
+ return None
+
+ @property
+ def post(self) -> None:
+ return None
+
+ @property
+ def dev(self) -> None:
+ return None
+
+ @property
+ def local(self) -> None:
+ return None
+
+ @property
+ def is_prerelease(self) -> bool:
+ return False
+
+ @property
+ def is_postrelease(self) -> bool:
return False
@property
- def is_postrelease(self):
+ def is_devrelease(self) -> bool:
return False
-_legacy_version_component_re = re.compile(
- r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
-)
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
- "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
}
-def _parse_version_parts(s):
+def _parse_version_parts(s: str) -> Iterator[str]:
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
@@ -128,7 +193,8 @@ def _parse_version_parts(s):
yield "*final"
-def _legacy_cmpkey(version):
+def _legacy_cmpkey(version: str) -> LegacyCmpKey:
+
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
@@ -137,7 +203,7 @@ def _legacy_cmpkey(version):
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
- parts = []
+ parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
@@ -150,9 +216,9 @@ def _legacy_cmpkey(version):
parts.pop()
parts.append(part)
- parts = tuple(parts)
- return epoch, parts
+ return epoch, tuple(parts)
+
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
@@ -190,33 +256,24 @@ VERSION_PATTERN = r"""
class Version(_BaseVersion):
- _regex = re.compile(
- r"^\s*" + VERSION_PATTERN + r"\s*$",
- re.VERBOSE | re.IGNORECASE,
- )
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version: str) -> None:
- def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
- raise InvalidVersion("Invalid version: '{0}'".format(version))
+ raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
- pre=_parse_letter_version(
- match.group("pre_l"),
- match.group("pre_n"),
- ),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
- match.group("post_l"),
- match.group("post_n1") or match.group("post_n2"),
- ),
- dev=_parse_letter_version(
- match.group("dev_l"),
- match.group("dev_n"),
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
@@ -230,72 +287,113 @@ class Version(_BaseVersion):
self._version.local,
)
- def __repr__(self):
- return "<Version({0})>".format(repr(str(self)))
+ def __repr__(self) -> str:
+ return f"<Version('{self}')>"
- def __str__(self):
+ def __str__(self) -> str:
parts = []
# Epoch
- if self._version.epoch != 0:
- parts.append("{0}!".format(self._version.epoch))
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
# Release segment
- parts.append(".".join(str(x) for x in self._version.release))
+ parts.append(".".join(str(x) for x in self.release))
# Pre-release
- if self._version.pre is not None:
- parts.append("".join(str(x) for x in self._version.pre))
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
# Post-release
- if self._version.post is not None:
- parts.append(".post{0}".format(self._version.post[1]))
+ if self.post is not None:
+ parts.append(f".post{self.post}")
# Development release
- if self._version.dev is not None:
- parts.append(".dev{0}".format(self._version.dev[1]))
+ if self.dev is not None:
+ parts.append(f".dev{self.dev}")
# Local version segment
- if self._version.local is not None:
- parts.append(
- "+{0}".format(".".join(str(x) for x in self._version.local))
- )
+ if self.local is not None:
+ parts.append(f"+{self.local}")
return "".join(parts)
@property
- def public(self):
+ def epoch(self) -> int:
+ _epoch: int = self._version.epoch
+ return _epoch
+
+ @property
+ def release(self) -> Tuple[int, ...]:
+ _release: Tuple[int, ...] = self._version.release
+ return _release
+
+ @property
+ def pre(self) -> Optional[Tuple[str, int]]:
+ _pre: Optional[Tuple[str, int]] = self._version.pre
+ return _pre
+
+ @property
+ def post(self) -> Optional[int]:
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self) -> Optional[int]:
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self) -> Optional[str]:
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self) -> str:
return str(self).split("+", 1)[0]
@property
- def base_version(self):
+ def base_version(self) -> str:
parts = []
# Epoch
- if self._version.epoch != 0:
- parts.append("{0}!".format(self._version.epoch))
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
# Release segment
- parts.append(".".join(str(x) for x in self._version.release))
+ parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
- def local(self):
- version_string = str(self)
- if "+" in version_string:
- return version_string.split("+", 1)[1]
+ def is_prerelease(self) -> bool:
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self) -> bool:
+ return self.post is not None
+
+ @property
+ def is_devrelease(self) -> bool:
+ return self.dev is not None
+
+ @property
+ def major(self) -> int:
+ return self.release[0] if len(self.release) >= 1 else 0
@property
- def is_prerelease(self):
- return bool(self._version.dev or self._version.pre)
+ def minor(self) -> int:
+ return self.release[1] if len(self.release) >= 2 else 0
@property
- def is_postrelease(self):
- return bool(self._version.post)
+ def micro(self) -> int:
+ return self.release[2] if len(self.release) >= 3 else 0
-def _parse_letter_version(letter, number):
+def _parse_letter_version(
+ letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
@@ -325,34 +423,40 @@ def _parse_letter_version(letter, number):
return letter, int(number)
+ return None
+
-_local_version_seperators = re.compile(r"[\._-]")
+_local_version_separators = re.compile(r"[\._-]")
-def _parse_local_version(local):
+def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
- for part in _local_version_seperators.split(local)
+ for part in _local_version_separators.split(local)
)
+ return None
+
+def _cmpkey(
+ epoch: int,
+ release: Tuple[int, ...],
+ pre: Optional[Tuple[str, int]],
+ post: Optional[Tuple[str, int]],
+ dev: Optional[Tuple[str, int]],
+ local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
-def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
- release = tuple(
- reversed(list(
- itertools.dropwhile(
- lambda x: x == 0,
- reversed(release),
- )
- ))
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
@@ -360,23 +464,31 @@ def _cmpkey(epoch, release, pre, post, dev, local):
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
- pre = -Infinity
+ _pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
- pre = Infinity
+ _pre = Infinity
+ else:
+ _pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
- post = -Infinity
+ _post: PrePostDevType = NegativeInfinity
+
+ else:
+ _post = post
# Versions without a development segment should sort after those with one.
if dev is None:
- dev = Infinity
+ _dev: PrePostDevType = Infinity
+
+ else:
+ _dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
- local = -Infinity
+ _local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
@@ -385,9 +497,8 @@ def _cmpkey(epoch, release, pre, post, dev, local):
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
- local = tuple(
- (i, "") if isinstance(i, int) else (-Infinity, i)
- for i in local
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
- return epoch, release, pre, post, dev, local
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/LICENSE.txt b/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000..bbc959e
--- /dev/null
+++ b/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/LICENSE.txt
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/top_level.txt b/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/top_level.txt
new file mode 100644
index 0000000..210dfec
--- /dev/null
+++ b/pkg_resources/_vendor/pyparsing-2.2.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyparsing
diff --git a/pkg_resources/_vendor/pyparsing.py b/pkg_resources/_vendor/pyparsing.py
index a212243..cf75e1e 100644
--- a/pkg_resources/_vendor/pyparsing.py
+++ b/pkg_resources/_vendor/pyparsing.py
@@ -1,6 +1,6 @@
# module pyparsing.py
#
-# Copyright (c) 2003-2016 Paul T. McGuire
+# Copyright (c) 2003-2018 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@@ -25,6 +25,7 @@
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
@@ -58,10 +59,23 @@ The pyparsing module handles some of the problems that are typically vexing when
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
"""
-__version__ = "2.1.10"
-__versionTime__ = "07 Oct 2016 01:31 UTC"
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
@@ -83,6 +97,15 @@ except ImportError:
from threading import RLock
try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping
+
+try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
@@ -144,7 +167,7 @@ else:
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
- xmlcharref = Regex('&#\d+;')
+ xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
@@ -809,7 +832,7 @@ class ParseResults(object):
return None
def getName(self):
- """
+ r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
@@ -940,7 +963,7 @@ class ParseResults(object):
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
-collections.MutableMapping.register(ParseResults)
+MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
@@ -1025,11 +1048,11 @@ def _trim_arity(func, maxargs=2):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
- return [(frame_summary.filename, frame_summary.lineno)]
+ return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
- return [(frame_summary.filename, frame_summary.lineno)]
+ return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
@@ -1226,7 +1249,7 @@ class ParserElement(object):
def setParseAction( self, *fns, **kwargs ):
"""
- Define action to perform when successfully matching parse element definition.
+ Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
@@ -1264,7 +1287,7 @@ class ParserElement(object):
def addParseAction( self, *fns, **kwargs ):
"""
- Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
@@ -1374,7 +1397,7 @@ class ParserElement(object):
else:
preloc = loc
tokensStart = preloc
- if self.mayIndexError or loc >= len(instring):
+ if self.mayIndexError or preloc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
@@ -1408,7 +1431,6 @@ class ParserElement(object):
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
-
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
@@ -1443,10 +1465,14 @@ class ParserElement(object):
def clear(self):
cache.clear()
+
+ def cache_len(self):
+ return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
@@ -1460,15 +1486,22 @@ class ParserElement(object):
def set(self, key, value):
cache[key] = value
- if len(cache) > size:
- cache.popitem(False)
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
def clear(self):
cache.clear()
+ def cache_len(self):
+ return len(cache)
+
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
@@ -1483,7 +1516,7 @@ class ParserElement(object):
def set(self, key, value):
cache[key] = value
- if len(cache) > size:
+ while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
@@ -1491,9 +1524,13 @@ class ParserElement(object):
cache.clear()
key_fifo.clear()
+ def cache_len(self):
+ return len(cache)
+
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
@@ -1743,8 +1780,12 @@ class ParserElement(object):
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
- ['More', 'Iron', 'Lead', 'Gold', 'I']
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
@@ -1819,7 +1860,7 @@ class ParserElement(object):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
- return And( [ self, And._ErrorStop(), other ] )
+ return self + And._ErrorStop() + other
def __rsub__(self, other ):
"""
@@ -2722,7 +2763,7 @@ class Word(Token):
class Regex(Token):
- """
+ r"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
@@ -2911,7 +2952,7 @@ class QuotedString(Token):
# replace escaped characters
if self.escChar:
- ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
@@ -3223,7 +3264,7 @@ class ParseExpression(ParserElement):
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
- elif isinstance( exprs, collections.Iterable ):
+ elif isinstance( exprs, Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
@@ -4374,7 +4415,7 @@ def traceParseAction(f):
@traceParseAction
def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens)))
+ return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
@@ -4564,7 +4605,7 @@ def oneOf( strs, caseless=False, useRegex=True ):
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
- elif isinstance(strs, collections.Iterable):
+ elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
@@ -4715,7 +4756,7 @@ stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
@@ -5020,7 +5061,9 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
- parse action tuple member may be omitted)
+ parse action tuple member may be omitted); if the parse action
+ is passed a tuple or list of functions, this is equivalent to
+ calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
@@ -5093,7 +5136,10 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
- matchExpr.setParseAction( pa )
+ if isinstance(pa, (tuple, list)):
+ matchExpr.setParseAction(*pa)
+ else:
+ matchExpr.setParseAction(pa)
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
diff --git a/pkg_resources/_vendor/six.py b/pkg_resources/_vendor/six.py
deleted file mode 100644
index 190c023..0000000
--- a/pkg_resources/_vendor/six.py
+++ /dev/null
@@ -1,868 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-from __future__ import absolute_import
-
-import functools
-import itertools
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.10.0"
-
-
-# Useful for very coarse version differentiation.
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-PY34 = sys.version_info[0:2] >= (3, 4)
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- if sys.platform.startswith("java"):
- # Jython always uses 32 bits.
- MAXSIZE = int((1 << 31) - 1)
- else:
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
- class X(object):
-
- def __len__(self):
- return 1 << 31
- try:
- len(X())
- except OverflowError:
- # 32-bit
- MAXSIZE = int((1 << 31) - 1)
- else:
- # 64-bit
- MAXSIZE = int((1 << 63) - 1)
- del X
-
-
-def _add_doc(func, doc):
- """Add documentation to a function."""
- func.__doc__ = doc
-
-
-def _import_module(name):
- """Import module, returning the module after the last dot."""
- __import__(name)
- return sys.modules[name]
-
-
-class _LazyDescr(object):
-
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, tp):
- result = self._resolve()
- setattr(obj, self.name, result) # Invokes __set__.
- try:
- # This is a bit ugly, but it avoids running this again by
- # removing this descriptor.
- delattr(obj.__class__, self.name)
- except AttributeError:
- pass
- return result
-
-
-class MovedModule(_LazyDescr):
-
- def __init__(self, name, old, new=None):
- super(MovedModule, self).__init__(name)
- if PY3:
- if new is None:
- new = name
- self.mod = new
- else:
- self.mod = old
-
- def _resolve(self):
- return _import_module(self.mod)
-
- def __getattr__(self, attr):
- _module = self._resolve()
- value = getattr(_module, attr)
- setattr(self, attr, value)
- return value
-
-
-class _LazyModule(types.ModuleType):
-
- def __init__(self, name):
- super(_LazyModule, self).__init__(name)
- self.__doc__ = self.__class__.__doc__
-
- def __dir__(self):
- attrs = ["__doc__", "__name__"]
- attrs += [attr.name for attr in self._moved_attributes]
- return attrs
-
- # Subclasses should override this
- _moved_attributes = []
-
-
-class MovedAttribute(_LazyDescr):
-
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
- super(MovedAttribute, self).__init__(name)
- if PY3:
- if new_mod is None:
- new_mod = name
- self.mod = new_mod
- if new_attr is None:
- if old_attr is None:
- new_attr = name
- else:
- new_attr = old_attr
- self.attr = new_attr
- else:
- self.mod = old_mod
- if old_attr is None:
- old_attr = name
- self.attr = old_attr
-
- def _resolve(self):
- module = _import_module(self.mod)
- return getattr(module, self.attr)
-
-
-class _SixMetaPathImporter(object):
-
- """
- A meta path importer to import six.moves and its submodules.
-
- This class implements a PEP302 finder and loader. It should be compatible
- with Python 2.5 and all existing versions of Python3
- """
-
- def __init__(self, six_module_name):
- self.name = six_module_name
- self.known_modules = {}
-
- def _add_module(self, mod, *fullnames):
- for fullname in fullnames:
- self.known_modules[self.name + "." + fullname] = mod
-
- def _get_module(self, fullname):
- return self.known_modules[self.name + "." + fullname]
-
- def find_module(self, fullname, path=None):
- if fullname in self.known_modules:
- return self
- return None
-
- def __get_module(self, fullname):
- try:
- return self.known_modules[fullname]
- except KeyError:
- raise ImportError("This loader does not know module " + fullname)
-
- def load_module(self, fullname):
- try:
- # in case of a reload
- return sys.modules[fullname]
- except KeyError:
- pass
- mod = self.__get_module(fullname)
- if isinstance(mod, MovedModule):
- mod = mod._resolve()
- else:
- mod.__loader__ = self
- sys.modules[fullname] = mod
- return mod
-
- def is_package(self, fullname):
- """
- Return true, if the named module is a package.
-
- We need this method to get correct spec objects with
- Python 3.4 (see PEP451)
- """
- return hasattr(self.__get_module(fullname), "__path__")
-
- def get_code(self, fullname):
- """Return None
-
- Required, if is_package is implemented"""
- self.__get_module(fullname) # eventually raises ImportError
- return None
- get_source = get_code # same as get_code
-
-_importer = _SixMetaPathImporter(__name__)
-
-
-class _MovedItems(_LazyModule):
-
- """Lazy loading of moved objects"""
- __path__ = [] # mark as package
-
-
-_moved_attributes = [
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
- MovedAttribute("intern", "__builtin__", "sys"),
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
- MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
- MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
- MovedAttribute("reduce", "__builtin__", "functools"),
- MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
- MovedAttribute("StringIO", "StringIO", "io"),
- MovedAttribute("UserDict", "UserDict", "collections"),
- MovedAttribute("UserList", "UserList", "collections"),
- MovedAttribute("UserString", "UserString", "collections"),
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
- MovedModule("builtins", "__builtin__"),
- MovedModule("configparser", "ConfigParser"),
- MovedModule("copyreg", "copy_reg"),
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
- MovedModule("http_cookies", "Cookie", "http.cookies"),
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
- MovedModule("html_parser", "HTMLParser", "html.parser"),
- MovedModule("http_client", "httplib", "http.client"),
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
- MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
- MovedModule("cPickle", "cPickle", "pickle"),
- MovedModule("queue", "Queue"),
- MovedModule("reprlib", "repr"),
- MovedModule("socketserver", "SocketServer"),
- MovedModule("_thread", "thread", "_thread"),
- MovedModule("tkinter", "Tkinter"),
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
- MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
- MovedModule("tkinter_colorchooser", "tkColorChooser",
- "tkinter.colorchooser"),
- MovedModule("tkinter_commondialog", "tkCommonDialog",
- "tkinter.commondialog"),
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
- "tkinter.simpledialog"),
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
- MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
-]
-# Add windows specific modules.
-if sys.platform == "win32":
- _moved_attributes += [
- MovedModule("winreg", "_winreg"),
- ]
-
-for attr in _moved_attributes:
- setattr(_MovedItems, attr.name, attr)
- if isinstance(attr, MovedModule):
- _importer._add_module(attr, "moves." + attr.name)
-del attr
-
-_MovedItems._moved_attributes = _moved_attributes
-
-moves = _MovedItems(__name__ + ".moves")
-_importer._add_module(moves, "moves")
-
-
-class Module_six_moves_urllib_parse(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_parse"""
-
-
-_urllib_parse_moved_attributes = [
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
- MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
- MovedAttribute("quote", "urllib", "urllib.parse"),
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
- MovedAttribute("unquote", "urllib", "urllib.parse"),
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
- MovedAttribute("splitquery", "urllib", "urllib.parse"),
- MovedAttribute("splittag", "urllib", "urllib.parse"),
- MovedAttribute("splituser", "urllib", "urllib.parse"),
- MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
- MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
- MovedAttribute("uses_params", "urlparse", "urllib.parse"),
- MovedAttribute("uses_query", "urlparse", "urllib.parse"),
- MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
-]
-for attr in _urllib_parse_moved_attributes:
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
- "moves.urllib_parse", "moves.urllib.parse")
-
-
-class Module_six_moves_urllib_error(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_error"""
-
-
-_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
-]
-for attr in _urllib_error_moved_attributes:
- setattr(Module_six_moves_urllib_error, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
- "moves.urllib_error", "moves.urllib.error")
-
-
-class Module_six_moves_urllib_request(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_request"""
-
-
-_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
- MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
- MovedAttribute("URLopener", "urllib", "urllib.request"),
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
- MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
-]
-for attr in _urllib_request_moved_attributes:
- setattr(Module_six_moves_urllib_request, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
- "moves.urllib_request", "moves.urllib.request")
-
-
-class Module_six_moves_urllib_response(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_response"""
-
-
-_urllib_response_moved_attributes = [
- MovedAttribute("addbase", "urllib", "urllib.response"),
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
- MovedAttribute("addinfo", "urllib", "urllib.response"),
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
-]
-for attr in _urllib_response_moved_attributes:
- setattr(Module_six_moves_urllib_response, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
- "moves.urllib_response", "moves.urllib.response")
-
-
-class Module_six_moves_urllib_robotparser(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
-
-
-_urllib_robotparser_moved_attributes = [
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
-]
-for attr in _urllib_robotparser_moved_attributes:
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
- "moves.urllib_robotparser", "moves.urllib.robotparser")
-
-
-class Module_six_moves_urllib(types.ModuleType):
-
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
- __path__ = [] # mark as package
- parse = _importer._get_module("moves.urllib_parse")
- error = _importer._get_module("moves.urllib_error")
- request = _importer._get_module("moves.urllib_request")
- response = _importer._get_module("moves.urllib_response")
- robotparser = _importer._get_module("moves.urllib_robotparser")
-
- def __dir__(self):
- return ['parse', 'error', 'request', 'response', 'robotparser']
-
-_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
- "moves.urllib")
-
-
-def add_move(move):
- """Add an item to six.moves."""
- setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
- """Remove item from six.moves."""
- try:
- delattr(_MovedItems, name)
- except AttributeError:
- try:
- del moves.__dict__[name]
- except KeyError:
- raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
- _meth_func = "__func__"
- _meth_self = "__self__"
-
- _func_closure = "__closure__"
- _func_code = "__code__"
- _func_defaults = "__defaults__"
- _func_globals = "__globals__"
-else:
- _meth_func = "im_func"
- _meth_self = "im_self"
-
- _func_closure = "func_closure"
- _func_code = "func_code"
- _func_defaults = "func_defaults"
- _func_globals = "func_globals"
-
-
-try:
- advance_iterator = next
-except NameError:
- def advance_iterator(it):
- return it.next()
-next = advance_iterator
-
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-if PY3:
- def get_unbound_function(unbound):
- return unbound
-
- create_bound_method = types.MethodType
-
- def create_unbound_method(func, cls):
- return func
-
- Iterator = object
-else:
- def get_unbound_function(unbound):
- return unbound.im_func
-
- def create_bound_method(func, obj):
- return types.MethodType(func, obj, obj.__class__)
-
- def create_unbound_method(func, cls):
- return types.MethodType(func, None, cls)
-
- class Iterator(object):
-
- def next(self):
- return type(self).__next__(self)
-
- callable = callable
-_add_doc(get_unbound_function,
- """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_closure = operator.attrgetter(_func_closure)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-get_function_globals = operator.attrgetter(_func_globals)
-
-
-if PY3:
- def iterkeys(d, **kw):
- return iter(d.keys(**kw))
-
- def itervalues(d, **kw):
- return iter(d.values(**kw))
-
- def iteritems(d, **kw):
- return iter(d.items(**kw))
-
- def iterlists(d, **kw):
- return iter(d.lists(**kw))
-
- viewkeys = operator.methodcaller("keys")
-
- viewvalues = operator.methodcaller("values")
-
- viewitems = operator.methodcaller("items")
-else:
- def iterkeys(d, **kw):
- return d.iterkeys(**kw)
-
- def itervalues(d, **kw):
- return d.itervalues(**kw)
-
- def iteritems(d, **kw):
- return d.iteritems(**kw)
-
- def iterlists(d, **kw):
- return d.iterlists(**kw)
-
- viewkeys = operator.methodcaller("viewkeys")
-
- viewvalues = operator.methodcaller("viewvalues")
-
- viewitems = operator.methodcaller("viewitems")
-
-_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
-_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
-_add_doc(iteritems,
- "Return an iterator over the (key, value) pairs of a dictionary.")
-_add_doc(iterlists,
- "Return an iterator over the (key, [values]) pairs of a dictionary.")
-
-
-if PY3:
- def b(s):
- return s.encode("latin-1")
-
- def u(s):
- return s
- unichr = chr
- import struct
- int2byte = struct.Struct(">B").pack
- del struct
- byte2int = operator.itemgetter(0)
- indexbytes = operator.getitem
- iterbytes = iter
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
- _assertCountEqual = "assertCountEqual"
- if sys.version_info[1] <= 1:
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
- else:
- _assertRaisesRegex = "assertRaisesRegex"
- _assertRegex = "assertRegex"
-else:
- def b(s):
- return s
- # Workaround for standalone backslash
-
- def u(s):
- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
- unichr = unichr
- int2byte = chr
-
- def byte2int(bs):
- return ord(bs[0])
-
- def indexbytes(buf, i):
- return ord(buf[i])
- iterbytes = functools.partial(itertools.imap, ord)
- import StringIO
- StringIO = BytesIO = StringIO.StringIO
- _assertCountEqual = "assertItemsEqual"
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-def assertCountEqual(self, *args, **kwargs):
- return getattr(self, _assertCountEqual)(*args, **kwargs)
-
-
-def assertRaisesRegex(self, *args, **kwargs):
- return getattr(self, _assertRaisesRegex)(*args, **kwargs)
-
-
-def assertRegex(self, *args, **kwargs):
- return getattr(self, _assertRegex)(*args, **kwargs)
-
-
-if PY3:
- exec_ = getattr(moves.builtins, "exec")
-
- def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- def exec_(_code_, _globs_=None, _locs_=None):
- """Execute code in a namespace."""
- if _globs_ is None:
- frame = sys._getframe(1)
- _globs_ = frame.f_globals
- if _locs_ is None:
- _locs_ = frame.f_locals
- del frame
- elif _locs_ is None:
- _locs_ = _globs_
- exec("""exec _code_ in _globs_, _locs_""")
-
- exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
-""")
-
-
-if sys.version_info[:2] == (3, 2):
- exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
-""")
-else:
- def raise_from(value, from_value):
- raise value
-
-
-print_ = getattr(moves.builtins, "print", None)
-if print_ is None:
- def print_(*args, **kwargs):
- """The new-style print function for Python 2.4 and 2.5."""
- fp = kwargs.pop("file", sys.stdout)
- if fp is None:
- return
-
- def write(data):
- if not isinstance(data, basestring):
- data = str(data)
- # If the file has an encoding, encode unicode with it.
- if (isinstance(fp, file) and
- isinstance(data, unicode) and
- fp.encoding is not None):
- errors = getattr(fp, "errors", None)
- if errors is None:
- errors = "strict"
- data = data.encode(fp.encoding, errors)
- fp.write(data)
- want_unicode = False
- sep = kwargs.pop("sep", None)
- if sep is not None:
- if isinstance(sep, unicode):
- want_unicode = True
- elif not isinstance(sep, str):
- raise TypeError("sep must be None or a string")
- end = kwargs.pop("end", None)
- if end is not None:
- if isinstance(end, unicode):
- want_unicode = True
- elif not isinstance(end, str):
- raise TypeError("end must be None or a string")
- if kwargs:
- raise TypeError("invalid keyword arguments to print()")
- if not want_unicode:
- for arg in args:
- if isinstance(arg, unicode):
- want_unicode = True
- break
- if want_unicode:
- newline = unicode("\n")
- space = unicode(" ")
- else:
- newline = "\n"
- space = " "
- if sep is None:
- sep = space
- if end is None:
- end = newline
- for i, arg in enumerate(args):
- if i:
- write(sep)
- write(arg)
- write(end)
-if sys.version_info[:2] < (3, 3):
- _print = print_
-
- def print_(*args, **kwargs):
- fp = kwargs.get("file", sys.stdout)
- flush = kwargs.pop("flush", False)
- _print(*args, **kwargs)
- if flush and fp is not None:
- fp.flush()
-
-_add_doc(reraise, """Reraise an exception.""")
-
-if sys.version_info[0:2] < (3, 4):
- def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
- updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
-else:
- wraps = functools.wraps
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a dummy
- # metaclass for one level of class instantiation that replaces itself with
- # the actual metaclass.
- class metaclass(meta):
-
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- slots = orig_vars.get('__slots__')
- if slots is not None:
- if isinstance(slots, str):
- slots = [slots]
- for slots_var in slots:
- orig_vars.pop(slots_var)
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
-
-
-def python_2_unicode_compatible(klass):
- """
- A decorator that defines __unicode__ and __str__ methods under Python 2.
- Under Python 3 it does nothing.
-
- To support Python 2 and 3 with a single code base, define a __str__ method
- returning text and apply this decorator to the class.
- """
- if PY2:
- if '__str__' not in klass.__dict__:
- raise ValueError("@python_2_unicode_compatible cannot be applied "
- "to %s because it doesn't define __str__()." %
- klass.__name__)
- klass.__unicode__ = klass.__str__
- klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
- return klass
-
-
-# Complete the moves implementation.
-# This code is at the end of this module to speed up module loading.
-# Turn this module into a package.
-__path__ = [] # required for PEP 302 and PEP 451
-__package__ = __name__ # see PEP 366 @ReservedAssignment
-if globals().get("__spec__") is not None:
- __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
-# Remove other six meta path importers, since they cause problems. This can
-# happen if six is removed from sys.modules and then reloaded. (Setuptools does
-# this for some reason.)
-if sys.meta_path:
- for i, importer in enumerate(sys.meta_path):
- # Here's some real nastiness: Another "instance" of the six module might
- # be floating around. Therefore, we can't use isinstance() to check for
- # the six meta path importer, since the other six instance will have
- # inserted an importer with different class.
- if (type(importer).__name__ == "_SixMetaPathImporter" and
- importer.name == __name__):
- del sys.meta_path[i]
- break
- del i, importer
-# Finally, add the importer to the meta path import hook.
-sys.meta_path.append(_importer)
diff --git a/pkg_resources/_vendor/vendored.txt b/pkg_resources/_vendor/vendored.txt
index 9a94c5b..d5dbe73 100644
--- a/pkg_resources/_vendor/vendored.txt
+++ b/pkg_resources/_vendor/vendored.txt
@@ -1,4 +1,8 @@
-packaging==16.8
-pyparsing==2.1.10
-six==1.10.0
-appdirs==1.4.0
+packaging==21.3
+pyparsing==2.2.1
+appdirs==1.4.3
+jaraco.text==3.7.0
+# required for jaraco.text on older Pythons
+importlib_resources==5.4.0
+# required for importlib_resources on older Pythons
+zipp==3.7.0
diff --git a/pkg_resources/_vendor/zipp-3.7.0.dist-info/top_level.txt b/pkg_resources/_vendor/zipp-3.7.0.dist-info/top_level.txt
new file mode 100644
index 0000000..e82f676
--- /dev/null
+++ b/pkg_resources/_vendor/zipp-3.7.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+zipp
diff --git a/pkg_resources/_vendor/zipp.py b/pkg_resources/_vendor/zipp.py
new file mode 100644
index 0000000..26b723c
--- /dev/null
+++ b/pkg_resources/_vendor/zipp.py
@@ -0,0 +1,329 @@
+import io
+import posixpath
+import zipfile
+import itertools
+import contextlib
+import sys
+import pathlib
+
+if sys.version_info < (3, 7):
+ from collections import OrderedDict
+else:
+ OrderedDict = dict
+
+
+__all__ = ['Path']
+
+
+def _parents(path):
+ """
+ Given a path with elements separated by
+ posixpath.sep, generate all parents of that path.
+
+ >>> list(_parents('b/d'))
+ ['b']
+ >>> list(_parents('/b/d/'))
+ ['/b']
+ >>> list(_parents('b/d/f/'))
+ ['b/d', 'b']
+ >>> list(_parents('b'))
+ []
+ >>> list(_parents(''))
+ []
+ """
+ return itertools.islice(_ancestry(path), 1, None)
+
+
+def _ancestry(path):
+ """
+ Given a path with elements separated by
+ posixpath.sep, generate all elements of that path
+
+ >>> list(_ancestry('b/d'))
+ ['b/d', 'b']
+ >>> list(_ancestry('/b/d/'))
+ ['/b/d', '/b']
+ >>> list(_ancestry('b/d/f/'))
+ ['b/d/f', 'b/d', 'b']
+ >>> list(_ancestry('b'))
+ ['b']
+ >>> list(_ancestry(''))
+ []
+ """
+ path = path.rstrip(posixpath.sep)
+ while path and path != posixpath.sep:
+ yield path
+ path, tail = posixpath.split(path)
+
+
+_dedupe = OrderedDict.fromkeys
+"""Deduplicate an iterable in original order"""
+
+
+def _difference(minuend, subtrahend):
+ """
+ Return items in minuend not in subtrahend, retaining order
+ with O(1) lookup.
+ """
+ return itertools.filterfalse(set(subtrahend).__contains__, minuend)
+
+
+class CompleteDirs(zipfile.ZipFile):
+ """
+ A ZipFile subclass that ensures that implied directories
+ are always included in the namelist.
+ """
+
+ @staticmethod
+ def _implied_dirs(names):
+ parents = itertools.chain.from_iterable(map(_parents, names))
+ as_dirs = (p + posixpath.sep for p in parents)
+ return _dedupe(_difference(as_dirs, names))
+
+ def namelist(self):
+ names = super(CompleteDirs, self).namelist()
+ return names + list(self._implied_dirs(names))
+
+ def _name_set(self):
+ return set(self.namelist())
+
+ def resolve_dir(self, name):
+ """
+ If the name represents a directory, return that name
+ as a directory (with the trailing slash).
+ """
+ names = self._name_set()
+ dirname = name + '/'
+ dir_match = name not in names and dirname in names
+ return dirname if dir_match else name
+
+ @classmethod
+ def make(cls, source):
+ """
+ Given a source (filename or zipfile), return an
+ appropriate CompleteDirs subclass.
+ """
+ if isinstance(source, CompleteDirs):
+ return source
+
+ if not isinstance(source, zipfile.ZipFile):
+ return cls(_pathlib_compat(source))
+
+ # Only allow for FastLookup when supplied zipfile is read-only
+ if 'r' not in source.mode:
+ cls = CompleteDirs
+
+ source.__class__ = cls
+ return source
+
+
+class FastLookup(CompleteDirs):
+ """
+ ZipFile subclass to ensure implicit
+ dirs exist and are resolved rapidly.
+ """
+
+ def namelist(self):
+ with contextlib.suppress(AttributeError):
+ return self.__names
+ self.__names = super(FastLookup, self).namelist()
+ return self.__names
+
+ def _name_set(self):
+ with contextlib.suppress(AttributeError):
+ return self.__lookup
+ self.__lookup = super(FastLookup, self)._name_set()
+ return self.__lookup
+
+
+def _pathlib_compat(path):
+ """
+ For path-like objects, convert to a filename for compatibility
+ on Python 3.6.1 and earlier.
+ """
+ try:
+ return path.__fspath__()
+ except AttributeError:
+ return str(path)
+
+
+class Path:
+ """
+ A pathlib-compatible interface for zip files.
+
+ Consider a zip file with this structure::
+
+ .
+ ├── a.txt
+ └── b
+ ├── c.txt
+ └── d
+ └── e.txt
+
+ >>> data = io.BytesIO()
+ >>> zf = zipfile.ZipFile(data, 'w')
+ >>> zf.writestr('a.txt', 'content of a')
+ >>> zf.writestr('b/c.txt', 'content of c')
+ >>> zf.writestr('b/d/e.txt', 'content of e')
+ >>> zf.filename = 'mem/abcde.zip'
+
+ Path accepts the zipfile object itself or a filename
+
+ >>> root = Path(zf)
+
+ From there, several path operations are available.
+
+ Directory iteration (including the zip file itself):
+
+ >>> a, b = root.iterdir()
+ >>> a
+ Path('mem/abcde.zip', 'a.txt')
+ >>> b
+ Path('mem/abcde.zip', 'b/')
+
+ name property:
+
+ >>> b.name
+ 'b'
+
+ join with divide operator:
+
+ >>> c = b / 'c.txt'
+ >>> c
+ Path('mem/abcde.zip', 'b/c.txt')
+ >>> c.name
+ 'c.txt'
+
+ Read text:
+
+ >>> c.read_text()
+ 'content of c'
+
+ existence:
+
+ >>> c.exists()
+ True
+ >>> (b / 'missing.txt').exists()
+ False
+
+ Coercion to string:
+
+ >>> import os
+ >>> str(c).replace(os.sep, posixpath.sep)
+ 'mem/abcde.zip/b/c.txt'
+
+ At the root, ``name``, ``filename``, and ``parent``
+ resolve to the zipfile. Note these attributes are not
+ valid and will raise a ``ValueError`` if the zipfile
+ has no filename.
+
+ >>> root.name
+ 'abcde.zip'
+ >>> str(root.filename).replace(os.sep, posixpath.sep)
+ 'mem/abcde.zip'
+ >>> str(root.parent)
+ 'mem'
+ """
+
+ __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
+
+ def __init__(self, root, at=""):
+ """
+ Construct a Path from a ZipFile or filename.
+
+ Note: When the source is an existing ZipFile object,
+ its type (__class__) will be mutated to a
+ specialized type. If the caller wishes to retain the
+ original type, the caller should either create a
+ separate ZipFile object or pass a filename.
+ """
+ self.root = FastLookup.make(root)
+ self.at = at
+
+ def open(self, mode='r', *args, pwd=None, **kwargs):
+ """
+ Open this entry as text or binary following the semantics
+ of ``pathlib.Path.open()`` by passing arguments through
+ to io.TextIOWrapper().
+ """
+ if self.is_dir():
+ raise IsADirectoryError(self)
+ zip_mode = mode[0]
+ if not self.exists() and zip_mode == 'r':
+ raise FileNotFoundError(self)
+ stream = self.root.open(self.at, zip_mode, pwd=pwd)
+ if 'b' in mode:
+ if args or kwargs:
+ raise ValueError("encoding args invalid for binary operation")
+ return stream
+ return io.TextIOWrapper(stream, *args, **kwargs)
+
+ @property
+ def name(self):
+ return pathlib.Path(self.at).name or self.filename.name
+
+ @property
+ def suffix(self):
+ return pathlib.Path(self.at).suffix or self.filename.suffix
+
+ @property
+ def suffixes(self):
+ return pathlib.Path(self.at).suffixes or self.filename.suffixes
+
+ @property
+ def stem(self):
+ return pathlib.Path(self.at).stem or self.filename.stem
+
+ @property
+ def filename(self):
+ return pathlib.Path(self.root.filename).joinpath(self.at)
+
+ def read_text(self, *args, **kwargs):
+ with self.open('r', *args, **kwargs) as strm:
+ return strm.read()
+
+ def read_bytes(self):
+ with self.open('rb') as strm:
+ return strm.read()
+
+ def _is_child(self, path):
+ return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
+
+ def _next(self, at):
+ return self.__class__(self.root, at)
+
+ def is_dir(self):
+ return not self.at or self.at.endswith("/")
+
+ def is_file(self):
+ return self.exists() and not self.is_dir()
+
+ def exists(self):
+ return self.at in self.root._name_set()
+
+ def iterdir(self):
+ if not self.is_dir():
+ raise ValueError("Can't listdir a file")
+ subs = map(self._next, self.root.namelist())
+ return filter(self._is_child, subs)
+
+ def __str__(self):
+ return posixpath.join(self.root.filename, self.at)
+
+ def __repr__(self):
+ return self.__repr.format(self=self)
+
+ def joinpath(self, *other):
+ next = posixpath.join(self.at, *map(_pathlib_compat, other))
+ return self._next(self.root.resolve_dir(next))
+
+ __truediv__ = joinpath
+
+ @property
+ def parent(self):
+ if not self.at:
+ return self.filename.parent
+ parent_at = posixpath.dirname(self.at.rstrip('/'))
+ if parent_at:
+ parent_at += '/'
+ return self._next(parent_at)