summaryrefslogtreecommitdiffstats
path: root/third_party/python/setuptools/pkg_resources
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/python/setuptools/pkg_resources
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/python/setuptools/pkg_resources')
-rw-r--r--third_party/python/setuptools/pkg_resources/__init__.py3285
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/__init__.py0
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/appdirs.py608
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py27
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py26
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py38
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py86
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py48
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py328
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py145
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py863
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py751
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py65
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py535
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py5742
-rw-r--r--third_party/python/setuptools/pkg_resources/extern/__init__.py66
16 files changed, 12613 insertions, 0 deletions
diff --git a/third_party/python/setuptools/pkg_resources/__init__.py b/third_party/python/setuptools/pkg_resources/__init__.py
new file mode 100644
index 0000000000..99b7f68075
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/__init__.py
@@ -0,0 +1,3285 @@
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+import importlib
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
+try:
+ FileExistsError
+except NameError:
+ FileExistsError = OSError
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
+from pkg_resources.extern import appdirs
+from pkg_resources.extern import packaging
+__import__('pkg_resources.extern.packaging.version')
+__import__('pkg_resources.extern.packaging.specifiers')
+__import__('pkg_resources.extern.packaging.requirements')
+__import__('pkg_resources.extern.packaging.markers')
+
+if sys.version_info < (3, 5):
+ raise RuntimeError("Python 3.5 or later is required")
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
+ return packaging.version.Version(v)
+ except packaging.version.InvalidVersion:
+ return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
+ state[k] = g['_sget_' + v](g[k])
+ return state
+
+
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
+ g['_sset_' + _state_vars[k]](k, g[k], v)
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of macOS that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of macOS that we are *running*. To allow usage of packages that
+ explicitly require a newer version of macOS, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
+ except ValueError:
+ # not macOS
+ pass
+ return plat
+
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
+ 'require', 'run_script', 'get_provider', 'get_distribution',
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
+ # Warnings
+ 'PkgResourcesDeprecationWarning',
+
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
+
+ def __repr__(self):
+ return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = '{}.{}'.format(*sys.version_info)
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macos_vers(_cache=[]):
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
+
+def _macos_arch(machine):
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and macOS.
+ """
+ from sysconfig import get_platform
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macos_vers()
+ machine = os.uname()[4].replace(" ", "_")
+ return "macosx-%d.%d-%s" % (
+ int(version[0]), int(version[1]),
+ _macos_arch(machine),
+ )
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
+ if provided is None or required is None or provided == required:
+ # easy case
+ return True
+
+ # macOS special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macOS designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
+ # egg isn't macOS or legacy darwin
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist, str):
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
+ return (
+ entry
+ for dist in self
+ for entry in dist.get_entry_map(group).values()
+ if name is None or name == entry.name
+ )
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
+ seen[key] = 1
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
+ keys = self.entry_keys.setdefault(entry, [])
+ keys2 = self.entry_keys.setdefault(dist.location, [])
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
+ def resolve(self, requirements, env=None, installer=None,
+ replace_conflicting=False, extras=None):
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
+ Unless `replace_conflicting=True`, raises a VersionConflict exception
+ if
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
+
+ `extras` is a list of the extras to be used with these requirements.
+ This is important because extra requirements may look like `my_req;
+ extra = "my_extra"`, which would otherwise be interpreted as a purely
+ optional requirement. Instead, we want to be able to assert that these
+ requirements are truly required.
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
+ req_extras = _ReqExtras()
+
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
+
+ if not req_extras.markers_pass(req, extras):
+ continue
+
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
+ dist = best[req.key] = env.best_match(
+ req, ws, installer,
+ replace_conflicting=replace_conflicting
+ )
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
+ req_extras[new_requirement] = req.extras
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
+ def find_plugins(
+ self, plugin_env, full_env=None, installer=None, fallback=True):
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
+ def subscribe(self, callback, existing=True):
+ """Invoke `callback` for all distributions
+
+ If `existing=True` (default),
+ call on all existing ones, as well.
+ """
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
+ if not existing:
+ return
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+ """
+ Map each requirement to the extras that demanded it.
+ """
+
+ def markers_pass(self, req, extras=None):
+ """
+ Evaluate markers for req against each extra that
+ demanded it.
+
+ Return False if the req has a marker and fails
+ evaluation. Otherwise, return True.
+ """
+ extra_evals = (
+ req.marker.evaluate({'extra': extra})
+ for extra in self.get(req, ()) + (extras or (None,))
+ )
+ return not req.marker or any(extra_evals)
+
+
+class Environment:
+ """Searchable snapshot of distributions on a search path"""
+
+ def __init__(
+ self, search_path=None, platform=get_supported_platform(),
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
+ optional string naming the desired version of Python (e.g. ``'3.6'``);
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
+ py_compat = (
+ self.python is None
+ or dist.py_version is None
+ or dist.py_version == self.python
+ )
+ return py_compat and compatible_platforms(dist.platform, self.platform)
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+ def best_match(
+ self, req, working_set, installer=None, replace_conflicting=False):
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
+ try:
+ dist = working_set.find(req)
+ except VersionConflict:
+ if not replace_conflicting:
+ raise
+ dist = None
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
+ The following error occurred while trying to extract file(s)
+ to the Python egg cache:
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
+ Perhaps your account does not have write access to this directory?
+ You can change the cache directory by setting the PYTHON_EGG_CACHE
+ environment variable to point to an accessible directory.
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
+ target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+ try:
+ _bypass_ensure_directory(target_path)
+ except Exception:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ msg = (
+ "Extraction path is writable by group/others "
+ "and vulnerable to attack when "
+ "used with get_resource_filename ({path}). "
+ "Consider a more secure "
+ "location (set with .set_extraction_path or the "
+ "PYTHON_EGG_CACHE environment variable)."
+ ).format(**locals())
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
+ Return the ``PYTHON_EGG_CACHE`` environment variable
+ or a platform-relevant user cache dir for an app
+ named "Python-Eggs".
+ """
+ return (
+ os.environ.get('PYTHON_EGG_CACHE')
+ or appdirs.user_cache_dir(appname='Python-Eggs')
+ )
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
+ version = version.replace(' ', '.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
+ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-', '_')
+
+
+def invalid_marker(text):
+ """
+ Validate text as a PEP 508 environment marker; return an exception
+ if invalid or False otherwise.
+ """
+ try:
+ evaluate_marker(text)
+ except SyntaxError as e:
+ e.filename = None
+ e.lineno = None
+ return e
+ return False
+
+
+def evaluate_marker(text, extra=None):
+ """
+ Evaluate a PEP 508 environment marker.
+ Return a boolean indicating the marker result in this environment.
+ Raise SyntaxError if marker is invalid.
+
+ This implementation uses the 'pyparsing' module.
+ """
+ try:
+ marker = packaging.markers.Marker(text)
+ return marker.evaluate()
+ except packaging.markers.InvalidMarker as e:
+ raise SyntaxError(e) from e
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
+ def _get_metadata_path(self, name):
+ return self._fn(self.egg_info, name)
+
+ def has_metadata(self, name):
+ if not self.egg_info:
+ return self.egg_info
+
+ path = self._get_metadata_path(name)
+ return self._has(path)
+
+ def get_metadata(self, name):
+ if not self.egg_info:
+ return ""
+ path = self._get_metadata_path(name)
+ value = self._get(path)
+ try:
+ return value.decode('utf-8')
+ except UnicodeDecodeError as exc:
+ # Include the path in the error message to simplify
+ # troubleshooting, and without changing the exception type.
+ exc.reason += ' in {} file at path: {}'.format(name, path)
+ raise
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
+ script = 'scripts/' + script_name
+ if not self.has_metadata(script):
+ raise ResolutionError(
+ "Script {script!r} not found in metadata at {self.egg_info!r}"
+ .format(**locals()),
+ )
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ with open(script_filename) as fid:
+ source = fid.read()
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
+ script_code = compile(script_text, script_filename, 'exec')
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
+ self._validate_resource_path(resource_name)
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
+ @staticmethod
+ def _validate_resource_path(path):
+ """
+ Validate the resource paths according to the docs.
+ https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+ >>> warned = getfixture('recwarn')
+ >>> warnings.simplefilter('always')
+ >>> vrp = NullProvider._validate_resource_path
+ >>> vrp('foo/bar.txt')
+ >>> bool(warned)
+ False
+ >>> vrp('../foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('/foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> vrp('foo/../../bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('foo/f../bar.txt')
+ >>> bool(warned)
+ False
+
+ Windows path separators are straight-up disallowed.
+ >>> vrp(r'\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ >>> vrp(r'C:\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ Blank values are allowed
+
+ >>> vrp('')
+ >>> bool(warned)
+ False
+
+ Non-string values are not.
+
+ >>> vrp(None)
+ Traceback (most recent call last):
+ ...
+ AttributeError: ...
+ """
+ invalid = (
+ os.path.pardir in path.split(posixpath.sep) or
+ posixpath.isabs(path) or
+ ntpath.isabs(path)
+ )
+ if not invalid:
+ return
+
+ msg = "Use of .. or absolute path in a resource path is not allowed."
+
+ # Aggressively disallow Windows absolute paths
+ if ntpath.isabs(path) and not posixpath.isabs(path):
+ raise ValueError(msg)
+
+ # for compatibility, warn; in future
+ # raise ValueError(msg)
+ warnings.warn(
+ msg[:-1] + " and will raise exceptions in a future release.",
+ DeprecationWarning,
+ stacklevel=4,
+ )
+
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
+def _parents(path):
+ """
+ yield all parents of path including path
+ """
+ last = None
+ while path != last:
+ yield path
+ last = path
+ path, _ = os.path.split(path)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # Assume that metadata may be nested inside a "basket"
+ # of multiple eggs and use module_path instead of .archive.
+ eggs = filter(_is_egg_path, _parents(self.module_path))
+ egg = next(eggs, None)
+ egg and self._set_egg(egg)
+
+ def _set_egg(self, path):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
+ loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+ for name in loader_names:
+ loader_cls = getattr(importlib_machinery, name, type(None))
+ register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
+ _isdir = _has = lambda self, path: False
+
+ def _get(self, path):
+ return ''
+
+ def _listdir(self, path):
+ return []
+
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
+ with zipfile.ZipFile(path) as zfile:
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
+ self.zip_pre = self.loader.archive + os.sep
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
+ fspath = fspath.rstrip(os.sep)
+ if fspath == self.loader.archive:
+ return ''
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
+ fspath = self.zip_pre + zip_path
+ if fspath.startswith(self.egg_root + os.sep):
+ return fspath[len(self.egg_root) + 1:].split(os.sep)
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
+ def _extract_resource(self, manager, zip_path):
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
+ outf, tmpnam = _mkstemp(
+ ".$extract",
+ dir=os.path.dirname(real_path),
+ )
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
+ elif os.name == 'nt':
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
+ if stat.st_size != size or stat.st_mtime != timestamp:
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def _get_metadata_path(self, name):
+ return self.path
+
+ def has_metadata(self, name):
+ return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+ def get_metadata(self, name):
+ if name != 'PKG-INFO':
+ raise KeyError("No metadata except PKG-INFO is available")
+
+ with io.open(self.path, encoding='utf-8', errors="replace") as f:
+ metadata = f.read()
+ self._warn_on_replacement(metadata)
+ return metadata
+
+ def _warn_on_replacement(self, metadata):
+ replacement_char = '�'
+ if replacement_char in metadata:
+ tmpl = "{self.path} could not be properly decoded in UTF-8"
+ msg = tmpl.format(**locals())
+ warnings.warn(msg)
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
+ self.zip_pre = importer.archive + os.sep
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
+ for subitem in metadata.resource_listdir(''):
+ if _is_egg_path(subitem):
+ subpath = os.path.join(path_item, subitem)
+ dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+ for dist in dists:
+ yield dist
+ elif subitem.lower().endswith('.dist-info'):
+ subpath = os.path.join(path_item, subitem)
+ submeta = EggMetadata(zipimport.zipimporter(subpath))
+ submeta.egg_info = subpath
+ yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+ """
+ Given a list of filenames, return them in descending order
+ by version number.
+
+ >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+ >>> _by_version_descending(names)
+ ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+ """
+ def _by_version(name):
+ """
+ Parse each component of the filename
+ """
+ name, ext = os.path.splitext(name)
+ parts = itertools.chain(name.split('-'), [ext])
+ return [packaging.version.parse(part) for part in parts]
+
+ return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
+ if _is_unpacked_egg(path_item):
+ yield Distribution.from_filename(
+ path_item, metadata=PathMetadata(
+ path_item, os.path.join(path_item, 'EGG-INFO')
+ )
+ )
+ return
+
+ entries = (
+ os.path.join(path_item, child)
+ for child in safe_listdir(path_item)
+ )
+
+ # for performance, before sorting by version,
+ # screen entries for only those that will yield
+ # distributions
+ filtered = (
+ entry
+ for entry in entries
+ if dist_factory(path_item, entry, only)
+ )
+
+ # scan for .egg and .egg-info in directory
+ path_item_entries = _by_version_descending(filtered)
+ for entry in path_item_entries:
+ fullpath = os.path.join(path_item, entry)
+ factory = dist_factory(path_item, entry, only)
+ for dist in factory(fullpath):
+ yield dist
+
+
+def dist_factory(path_item, entry, only):
+ """Return a dist_factory for the given entry."""
+ lower = entry.lower()
+ is_egg_info = lower.endswith('.egg-info')
+ is_dist_info = (
+ lower.endswith('.dist-info') and
+ os.path.isdir(os.path.join(path_item, entry))
+ )
+ is_meta = is_egg_info or is_dist_info
+ return (
+ distributions_from_metadata
+ if is_meta else
+ find_distributions
+ if not only and _is_egg_path(entry) else
+ resolve_egg_link
+ if not only and lower.endswith('.egg-link') else
+ NoDists()
+ )
+
+
+class NoDists:
+ """
+ >>> bool(NoDists())
+ False
+
+ >>> list(NoDists()('anything'))
+ []
+ """
+ def __bool__(self):
+ return False
+
+ def __call__(self, fullpath):
+ return iter(())
+
+
+def safe_listdir(path):
+ """
+ Attempt to list contents of path, but suppress some exceptions.
+ """
+ try:
+ return os.listdir(path)
+ except (PermissionError, NotADirectoryError):
+ pass
+ except OSError as e:
+ # Ignore the directory if does not exist, not a directory or
+ # permission denied
+ if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
+ raise
+ return ()
+
+
+def distributions_from_metadata(path):
+ root = os.path.dirname(path)
+ if os.path.isdir(path):
+ if len(os.listdir(path)) == 0:
+ # empty metadata dir; skip
+ return
+ metadata = PathMetadata(root, path)
+ else:
+ metadata = FileMetadata(path)
+ entry = os.path.basename(path)
+ yield Distribution.from_location(
+ root, entry, metadata, precedence=DEVELOP_DIST,
+ )
+
+
+def non_empty_lines(path):
+ """
+ Yield non-empty lines from file at path
+ """
+ with open(path) as f:
+ for line in f:
+ line = line.strip()
+ if line:
+ yield line
+
+
+def resolve_egg_link(path):
+ """
+ Given a path to an .egg-link, resolve distributions
+ present in the referenced path.
+ """
+ referenced_paths = non_empty_lines(path)
+ resolved_paths = (
+ os.path.join(os.path.dirname(path), ref)
+ for ref in referenced_paths
+ )
+ dist_groups = map(find_distributions, resolved_paths)
+ return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
+
+ # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
+ try:
+ loader = importer.find_spec(packageName).loader
+ except AttributeError:
+ # capture warnings due to #1111
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ loader = importer.find_module(packageName)
+
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
+ elif not hasattr(module, '__path__'):
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
+ importlib.import_module(packageName)
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
+
+ def safe_sys_path_index(entry):
+ """
+ Workaround for #520 and #513.
+ """
+ try:
+ return sys_path.index(entry)
+ except ValueError:
+ return float('inf')
+
+ def position_in_sys_path(path):
+ """
+ Return the ordinal of the path based on its position in sys.path
+ """
+ path_parts = path.split(os.sep)
+ module_parts = package_name.count('.') + 1
+ parts = path_parts[:-module_parts]
+ return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+ new_path = sorted(orig_path, key=position_in_sys_path)
+ new_path = [_normalize_cached(p) for p in new_path]
+
+ if isinstance(module.__path__, list):
+ module.__path__[:] = new_path
+ else:
+ module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
+ path = sys.path
+ parent, _, _ = packageName.rpartition('.')
+
+ if parent:
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError as e:
+ raise TypeError("Not a package:", parent) from e
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
+ _namespace_packages.setdefault(parent or None, []).append(packageName)
+ _namespace_packages.setdefault(packageName, [])
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
+ for package in _namespace_packages.get(parent, ()):
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
+ if _normalize_cached(item) == normalized:
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
+ return os.path.normcase(os.path.realpath(os.path.normpath(
+ _cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename): # pragma: nocover
+ """
+ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+ symlink components. Using
+ os.path.abspath() works around this limitation. A fix in os.getcwd()
+ would probably better, in Cygwin even more so, except
+ that this seems to be by design...
+ """
+ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
+
+def _is_egg_path(path):
+ """
+ Determine if given path appears to be an egg.
+ """
+ return _is_zip_egg(path) or _is_unpacked_egg(path)
+
+
+def _is_zip_egg(path):
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(path) and
+ zipfile.is_zipfile(path)
+ )
+
+
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a string or sequence"""
+ if isinstance(strs, str):
+ for s in strs.splitlines():
+ s = s.strip()
+ # skip blank lines/comments
+ if s and not s.startswith('#'):
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P<name>[^-]+) (
+ -(?P<ver>[^-]+) (
+ -py(?P<pyver>[^-]+) (
+ -(?P<plat>.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
+ self.extras = tuple(extras)
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
+ PkgResourcesDeprecationWarning,
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
+ raise ImportError(str(exc)) from exc
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
+
+ # Get the requirements for this entry point with all its extras and
+ # then resolve them. We have to pass `extras` along when resolving so
+ # that the working set knows what extras we want. Otherwise, for
+ # dist-info distributions, the working set will assume that the
+ # requirements for that extra are purely optional and skip over them.
+ reqs = self.dist.requires(self.extras)
+ items = working_set.resolve(reqs, env, installer, extras=self.extras)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P<name>.+?)\s*'
+ r'=\s*'
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+))?\s*'
+ r'(?P<extras>\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
+ this[ep.name] = ep
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
+ def is_version_line(line):
+ return line.lower().startswith('version:')
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
+class Distribution:
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
+ def __init__(
+ self, location=None, metadata=None, project_name=None,
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
+ project_name, version, py_version, platform = [None] * 4
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
+ self.location,
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError as e:
+ version = self._get_version()
+ if version is None:
+ path = self._get_metadata_path_for_display(self.PKG_INFO)
+ msg = (
+ "Missing 'Version:' header and/or {} file at path: {}"
+ ).format(self.PKG_INFO, path)
+ raise ValueError(msg, self) from e
+
+ return version
+
+ @property
+ def _dep_map(self):
+ """
+ A map of extra to its list of (direct) requirements
+ for this distribution, including the null extra.
+ """
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._filter_extras(self._build_dep_map())
+ return self.__dep_map
+
+ @staticmethod
+ def _filter_extras(dm):
+ """
+ Given a mapping of extras to dependencies, strip off
+ environment markers and filter out any dependencies
+ not matching the markers.
+ """
+ for extra in list(filter(None, dm)):
+ new_extra = extra
+ reqs = dm.pop(extra)
+ new_extra, _, marker = extra.partition(':')
+ fails_marker = marker and (
+ invalid_marker(marker)
+ or not evaluate_marker(marker)
+ )
+ if fails_marker:
+ reqs = []
+ new_extra = safe_extra(new_extra) or None
+
+ dm.setdefault(new_extra, []).extend(reqs)
+ return dm
+
+ def _build_dep_map(self):
+ dm = {}
+ for name in 'requires.txt', 'depends.txt':
+ for extra, reqs in split_sections(self._get_metadata(name)):
+ dm.setdefault(extra, []).extend(parse_requirements(reqs))
+ return dm
+
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError as e:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ ) from e
+ return deps
+
+ def _get_metadata_path_for_display(self, name):
+ """
+ Return the path to the given metadata file, if available.
+ """
+ try:
+ # We need to access _get_metadata_path() on the provider object
+ # directly rather than through this class's __getattr__()
+ # since _get_metadata_path() is marked private.
+ path = self._provider._get_metadata_path(name)
+
+ # Handle exceptions e.g. in case the distribution's metadata
+ # provider doesn't support _get_metadata_path().
+ except Exception:
+ return '[could not detect]'
+
+ return path
+
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
+ def _get_version(self):
+ lines = self._get_metadata(self.PKG_INFO)
+ version = _version_from_file(lines)
+
+ return version
+
+ def activate(self, path=None, replace=False):
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
+ self.insert_on(path, replace=replace)
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
+ def __dir__(self):
+ return list(
+ set(super(Distribution, self).__dir__())
+ | set(
+ attr for attr in self._provider.__dir__()
+ if not attr.startswith('_')
+ )
+ )
+
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
+ return ep_map.get(group, {})
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ def insert_on(self, path, loc=None, replace=False):
+ """Ensure self.location is on path
+
+ If replace=False (default):
+ - If location is already in path anywhere, do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent.
+ - Else: add to the end of path.
+ If replace=True:
+ - If location is already on path anywhere (not eggs)
+ or higher priority than its parent (eggs)
+ do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent,
+ removing any lower-priority entries.
+ - Else: add it to the front of path.
+ """
+
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
+ npath = [(p and _normalize_cached(p) or p) for p in path]
+
+ for p, item in enumerate(npath):
+ if item == nloc:
+ if replace:
+ break
+ else:
+ # don't modify path (even removing duplicates) if
+ # found and not replace
+ return
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
+ # UNLESS it's already been added to sys.path and replace=False
+ if (not replace) and nloc in npath[p:]:
+ return
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
+ np = npath.index(nloc, p + 1)
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
+ def clone(self, **kw):
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
+ md_version = self._get_version()
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
+ """
+ Wrap an actual or potential sys.path entry
+ w/metadata, .dist-info style.
+ """
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ reqs.extend(parse_requirements(req))
+
+ def reqs_for_extra(extra):
+ for req in reqs:
+ if not req.marker or req.marker.evaluate({'extra': extra}):
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ s_extra = safe_extra(extra.strip())
+ dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if ' #' in line:
+ line = line[:line.find(' #')]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith('\\'):
+ line = line[:-2].strip()
+ try:
+ line += next(lines)
+ except StopIteration:
+ return
+ yield Requirement(line)
+
+
+class RequirementParseError(packaging.requirements.InvalidRequirement):
+ "Compatibility wrapper for InvalidRequirement"
+
+
+class Requirement(packaging.requirements.Requirement):
+ def __init__(self, requirement_string):
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ super(Requirement, self).__init__(requirement_string)
+ self.unsafe_name = self.name
+ project_name = safe_name(self.name)
+ self.project_name, self.key = project_name, project_name.lower()
+ self.specs = [
+ (spec.operator, spec.version) for spec in self.specifier]
+ self.extras = tuple(map(safe_extra, self.extras))
+ self.hashCmp = (
+ self.key,
+ self.url,
+ self.specifier,
+ frozenset(self.extras),
+ str(self.marker) if self.marker else None,
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
+ def __repr__(self):
+ return "Requirement.parse(%r)" % str(self)
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
+def _always_object(classes):
+ """
+ Ensure object appears in the mro even
+ for old-style classes.
+ """
+ if object not in classes:
+ return classes + (object,)
+ return classes
+
+
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
+ types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+ for t in types:
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ os.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
+ try:
+ mkdir(dirname, 0o755)
+ except FileExistsError:
+ pass
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
+
+def _mkstemp(*args, **kw):
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
+ return tempfile.mkstemp(*args, **kw)
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
+ g.update(
+ (name, getattr(manager, name))
+ for name in dir(manager)
+ if not name.startswith('_')
+ )
+
+
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
+ # Activate all distributions already on sys.path with replace=False and
+ # ensure that all distributions added to the working set in the future
+ # (e.g. by calling ``require()``) will get activated as well,
+ # with higher priority (replace=True).
+ tuple(
+ dist.activate(replace=False)
+ for dist in working_set
+ )
+ add_activation_listener(
+ lambda dist: dist.activate(replace=True),
+ existing=False,
+ )
+ working_set.entries = []
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
+
+
+class PkgResourcesDeprecationWarning(Warning):
+ """
+ Base class for warning about deprecations in ``pkg_resources``
+
+ This class is not derived from ``DeprecationWarning``, and as such is
+ visible by default.
+ """
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/__init__.py b/third_party/python/setuptools/pkg_resources/_vendor/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/__init__.py
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py b/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py
new file mode 100644
index 0000000000..ae67001af8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 3)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ unicode = str
+
+if sys.platform.startswith('java'):
+ import platform
+ os_name = platform.java_ver()[3][0]
+ if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+ system = 'win32'
+ elif os_name.startswith('Mac'): # "Mac OS X", etc.
+ system = 'darwin'
+ else: # "Linux", "SunOS", "FreeBSD", etc.
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
+ # are actually checked for and the rest of the module expects
+ # *sys.platform* style strings.
+ system = 'linux2'
+else:
+ system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user data directories are:
+ Mac OS X: ~/Library/Application Support/<AppName>
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+ That means, by default "~/.local/share/<AppName>".
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(_get_win_folder(const))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Application Support/')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of data dirs should be
+ returned. By default, the first item from XDG_DATA_DIRS is
+ returned, or '/usr/local/share/<AppName>',
+ if XDG_DATA_DIRS is not set
+
+ Typical site data directories are:
+ Mac OS X: /Library/Application Support/<AppName>
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
+
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('/Library/Application Support')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_DATA_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_DATA_DIRS',
+ os.pathsep.join(['/usr/local/share', '/usr/share']))
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific config dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user config directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+ That means, by default "~/.config/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of config dirs should be
+ returned. By default, the first item from XDG_CONFIG_DIRS is
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+ Typical site config directories are:
+ Mac OS X: same as site_data_dir
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+ $XDG_CONFIG_DIRS
+ Win *: same as site_data_dir
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system in ["win32", "darwin"]:
+ path = site_data_dir(appname, appauthor)
+ if appname and version:
+ path = os.path.join(path, version)
+ else:
+ # XDG default for $XDG_CONFIG_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific cache dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Cache" to the base app data dir for Windows. See
+ discussion below.
+
+ Typical user cache directories are:
+ Mac OS X: ~/Library/Caches/<AppName>
+ Unix: ~/.cache/<AppName> (XDG default)
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+ On Windows the only suggestion in the MSDN docs is that local settings go in
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+ app data dir (the default returned by `user_data_dir` above). Apps typically
+ put cache data somewhere *under* the given dir here. Some examples:
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+ ...\Acme\SuperApp\Cache\1.0
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ if opinion:
+ path = os.path.join(path, "Cache")
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Caches')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific log dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Logs" to the base app data dir for Windows, and "log" to the
+ base cache dir for Unix. See discussion below.
+
+ Typical user log directories are:
+ Mac OS X: ~/Library/Logs/<AppName>
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+ On Windows the only suggestion in the MSDN docs is that local settings
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+ examples of what some windows apps use for a logs dir.)
+
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+ value for Windows and appends "log" to the user cache dir for Unix.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "darwin":
+ path = os.path.join(
+ os.path.expanduser('~/Library/Logs'),
+ appname)
+ elif system == "win32":
+ path = user_data_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "Logs")
+ else:
+ path = user_cache_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "log")
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+class AppDirs(object):
+ """Convenience wrapper for getting application dirs."""
+ def __init__(self, appname=None, appauthor=None, version=None,
+ roaming=False, multipath=False):
+ self.appname = appname
+ self.appauthor = appauthor
+ self.version = version
+ self.roaming = roaming
+ self.multipath = multipath
+
+ @property
+ def user_data_dir(self):
+ return user_data_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_data_dir(self):
+ return site_data_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_config_dir(self):
+ return user_config_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_config_dir(self):
+ return site_config_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_cache_dir(self):
+ return user_cache_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_log_dir(self):
+ return user_log_dir(self.appname, self.appauthor,
+ version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+ """This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ if PY3:
+ import winreg as _winreg
+ else:
+ import _winreg
+
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ }[csidl_name]
+
+ key = _winreg.OpenKey(
+ _winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ )
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+ return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+ from win32com.shell import shellcon, shell
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+ # Try to make this a unicode path because SHGetFolderPath does
+ # not return unicode strings when there is unicode data in the
+ # path.
+ try:
+ dir = unicode(dir)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ try:
+ import win32api
+ dir = win32api.GetShortPathName(dir)
+ except ImportError:
+ pass
+ except UnicodeError:
+ pass
+ return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+ import ctypes
+
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ }[csidl_name]
+
+ buf = ctypes.create_unicode_buffer(1024)
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in buf:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+ import array
+ from com.sun import jna
+ from com.sun.jna.platform import win32
+
+ buf_size = win32.WinDef.MAX_PATH * 2
+ buf = array.zeros('c', buf_size)
+ shell = win32.Shell32.INSTANCE
+ shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf = array.zeros('c', buf_size)
+ kernel = win32.Kernel32.INSTANCE
+ if kernel.GetShortPathName(dir, buf, buf_size):
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ return dir
+
+if system == "win32":
+ try:
+ import win32com.shell
+ _get_win_folder = _get_win_folder_with_pywin32
+ except ImportError:
+ try:
+ from ctypes import windll
+ _get_win_folder = _get_win_folder_with_ctypes
+ except ImportError:
+ try:
+ import com.sun.jna
+ _get_win_folder = _get_win_folder_with_jna
+ except ImportError:
+ _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+ appname = "MyApp"
+ appauthor = "MyCompany"
+
+ props = ("user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir")
+
+ print("-- app dirs %s --" % __version__)
+
+ print("-- app dirs (with optional 'version')")
+ dirs = AppDirs(appname, appauthor, version="1.0")
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = AppDirs(appname, appauthor)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = AppDirs(appname)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = AppDirs(appname, appauthor=False)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000..4d998578d7
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py
@@ -0,0 +1,27 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "20.4"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000..a0cf67df52
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000..e54bd4ede8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py
@@ -0,0 +1,38 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Dict, Tuple, Type
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+ string_types = (str,)
+else:
+ string_types = (basestring,)
+
+
+def with_metaclass(meta, *bases):
+ # type: (Type[Any], Tuple[Type[Any], ...]) -> Any
+ """
+ Create a base class with a metaclass.
+ """
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta): # type: ignore
+ def __new__(cls, name, this_bases, d):
+ # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any
+ return meta(name, bases, d)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000..800d5c5588
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py
@@ -0,0 +1,86 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class InfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __neg__(self):
+ # type: (object) -> NegativeInfinityType
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "-Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __neg__(self):
+ # type: (object) -> InfinityType
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py
new file mode 100644
index 0000000000..77a8b9185a
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py
@@ -0,0 +1,48 @@
+"""For neatly implementing static typing in packaging.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In packaging, all static-typing related imports should be guarded as follows:
+
+ from packaging._typing import TYPE_CHECKING
+
+ if TYPE_CHECKING:
+ from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+__all__ = ["TYPE_CHECKING", "cast"]
+
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
+else:
+ # executed at runtime
+ def cast(type_, value): # noqa
+ return value
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py
new file mode 100644
index 0000000000..fd1559c10e
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py
@@ -0,0 +1,328 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+
+from ._compat import string_types
+from ._typing import TYPE_CHECKING
+from .specifiers import Specifier, InvalidSpecifier
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+ Operator = Callable[[str, str], bool]
+
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node(object):
+ def __init__(self, value):
+ # type: (Any) -> None
+ self.value = value
+
+ def __str__(self):
+ # type: () -> str
+ return str(self.value)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+ def serialize(self):
+ # type: () -> str
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self):
+ # type: () -> str
+ return '"{0}"'.format(self)
+
+
+class Op(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+ # type: (Union[ParseResults, List[Any]]) -> List[Any]
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(marker, first=True):
+ # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
+
+ assert isinstance(marker, (list, tuple, string_types))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+} # type: Dict[str, Operator]
+
+
+def _eval_op(lhs, op, rhs):
+ # type: (str, Op, str) -> bool
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper = _operators.get(op.serialize()) # type: Optional[Operator]
+ if oper is None:
+ raise UndefinedComparison(
+ "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+ )
+
+ return oper(lhs, rhs)
+
+
+class Undefined(object):
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment, name):
+ # type: (Dict[str, str], str) -> str
+ value = environment.get(name, _undefined) # type: Union[str, Undefined]
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ "{0!r} does not exist in evaluation environment.".format(name)
+ )
+
+ return value
+
+
+def _evaluate_markers(markers, environment):
+ # type: (List[Any], Dict[str, str]) -> bool
+ groups = [[]] # type: List[List[bool]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, string_types))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+ # type: (sys._version_info) -> str
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment():
+ # type: () -> Dict[str, str]
+ if hasattr(sys, "implementation"):
+ # Ignoring the `sys.implementation` reference for type checking due to
+ # mypy not liking that the attribute doesn't exist in Python 2.7 when
+ # run with the `--py27` flag.
+ iver = format_full_version(sys.implementation.version) # type: ignore
+ implementation_name = sys.implementation.name # type: ignore
+ else:
+ iver = "0"
+ implementation_name = ""
+
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker(object):
+ def __init__(self, marker):
+ # type: (str) -> None
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+ marker, marker[e.loc : e.loc + 8]
+ )
+ raise InvalidMarker(err_str)
+
+ def __str__(self):
+ # type: () -> str
+ return _format_marker(self._markers)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Marker({0!r})>".format(str(self))
+
+ def evaluate(self, environment=None):
+ # type: (Optional[Dict[str, str]]) -> bool
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000..9495a1df1e
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py
@@ -0,0 +1,145 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+from urllib import parse as urlparse
+
+from ._typing import TYPE_CHECKING
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import List
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement(object):
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string):
+ # type: (str) -> None
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ 'Parse error at "{0!r}": {1}'.format(
+ requirement_string[e.loc : e.loc + 8], e.msg
+ )
+ )
+
+ self.name = req.name
+ if req.url:
+ parsed_url = urlparse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urlparse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement("Invalid URL: {0}".format(req.url))
+ self.url = req.url
+ else:
+ self.url = None
+ self.extras = set(req.extras.asList() if req.extras else [])
+ self.specifier = SpecifierSet(req.specifier)
+ self.marker = req.marker if req.marker else None
+
+ def __str__(self):
+ # type: () -> str
+ parts = [self.name] # type: List[str]
+
+ if self.extras:
+ parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append("@ {0}".format(self.url))
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append("; {0}".format(self.marker))
+
+ return "".join(parts)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Requirement({0!r})>".format(str(self))
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000..fe09bb1dbb
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py
@@ -0,0 +1,863 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from ._typing import TYPE_CHECKING
+from .utils import canonicalize_version
+from .version import Version, LegacyVersion, parse
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ List,
+ Dict,
+ Union,
+ Iterable,
+ Iterator,
+ Optional,
+ Callable,
+ Tuple,
+ FrozenSet,
+ )
+
+ ParsedVersion = Union[Version, LegacyVersion]
+ UnparsedVersion = Union[Version, LegacyVersion, str]
+ CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore
+ @abc.abstractmethod
+ def __str__(self):
+ # type: () -> str
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self):
+ # type: () -> int
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractmethod
+ def __ne__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are not equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item, prereleases=None):
+ # type: (str, Optional[bool]) -> bool
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators = {} # type: Dict[str, str]
+
+ def __init__(self, spec="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+ self._spec = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ ) # type: Tuple[str, str]
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return "{0}{1}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self):
+ # type: () -> Tuple[str, Union[Version, str]]
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._spec != other._spec
+
+ def _get_operator(self, op):
+ # type: (str) -> CallableOperator
+ operator_callable = getattr(
+ self, "_compare_{0}".format(self._operators[op])
+ ) # type: CallableOperator
+ return operator_callable
+
+ def _coerce_version(self, version):
+ # type: (UnparsedVersion) -> ParsedVersion
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self):
+ # type: () -> str
+ return self._spec[0]
+
+ @property
+ def version(self):
+ # type: () -> str
+ return self._spec[1]
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (str) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (UnparsedVersion, Optional[bool]) -> bool
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable = self._get_operator(self.operator) # type: CallableOperator
+ return operator_callable(normalized_item, self.version)
+
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later incase nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(==|!=|<=|>=|<|>))
+ \s*
+ (?P<version>
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def _coerce_version(self, version):
+ # type: (Union[ParsedVersion, str]) -> LegacyVersion
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn # type: (Callable[[Specifier, ParsedVersion, str], bool])
+):
+ # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool]
+ @functools.wraps(fn)
+ def wrapped(self, prospective, spec):
+ # type: (Specifier, ParsedVersion, str) -> bool
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+ (?P<version>
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?<!==|!=|~=) # We have special cases for these
+ # operators so we want to make sure they
+ # don't match here.
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "~=": "compatible",
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore post and dev releases and we want to treat the pre-release as
+ # it's own separate segment.
+ prefix = ".".join(
+ list(
+ itertools.takewhile(
+ lambda x: (not x.startswith("post") and not x.startswith("dev")),
+ _version_split(spec),
+ )
+ )[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective, spec):
+ # type: (Version, str) -> bool
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self):
+ # type: () -> bool
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+ # type: (str) -> List[str]
+ result = [] # type: List[str]
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _pad_version(left, right):
+ # type: (List[str], List[str]) -> Tuple[List[str], List[str]]
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(self, specifiers="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._specs)
+
+ def __and__(self, other):
+ # type: (Union[SpecifierSet, str]) -> SpecifierSet
+ if isinstance(other, string_types):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs != other._specs
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._specs)
+
+ def __iter__(self):
+ # type: () -> Iterator[FrozenSet[_IndividualSpecifier]]
+ return iter(self._specs)
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (Union[ParsedVersion, str]) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (Union[ParsedVersion, str], Optional[bool]) -> bool
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self,
+ iterable, # type: Iterable[Union[ParsedVersion, str]]
+ prereleases=None, # type: Optional[bool]
+ ):
+ # type: (...) -> Iterable[Union[ParsedVersion, str]]
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered = [] # type: List[Union[ParsedVersion, str]]
+ found_prereleases = [] # type: List[Union[ParsedVersion, str]]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py
new file mode 100644
index 0000000000..9064910b8b
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py
@@ -0,0 +1,751 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import
+
+import distutils.util
+
+try:
+ from importlib.machinery import EXTENSION_SUFFIXES
+except ImportError: # pragma: no cover
+ import imp
+
+ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
+ del imp
+import logging
+import os
+import platform
+import re
+import struct
+import sys
+import sysconfig
+import warnings
+
+from ._typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ Dict,
+ FrozenSet,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ )
+
+ PythonVersion = Sequence[int]
+ MacVersion = Tuple[int, int]
+ GlibcVersion = Tuple[int, int]
+
+
+logger = logging.getLogger(__name__)
+
+INTERPRETER_SHORT_NAMES = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+} # type: Dict[str, str]
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform"]
+
+ def __init__(self, interpreter, abi, platform):
+ # type: (str, str, str) -> None
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+
+ @property
+ def interpreter(self):
+ # type: () -> str
+ return self._interpreter
+
+ @property
+ def abi(self):
+ # type: () -> str
+ return self._abi
+
+ @property
+ def platform(self):
+ # type: () -> str
+ return self._platform
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self.platform == other.platform)
+ and (self.abi == other.abi)
+ and (self.interpreter == other.interpreter)
+ )
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self._interpreter, self._abi, self._platform))
+
+ def __str__(self):
+ # type: () -> str
+ return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
+
+
+def parse_tag(tag):
+ # type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _warn_keyword_parameter(func_name, kwargs):
+ # type: (str, Dict[str, bool]) -> bool
+ """
+ Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
+ """
+ if not kwargs:
+ return False
+ elif len(kwargs) > 1 or "warn" not in kwargs:
+ kwargs.pop("warn", None)
+ arg = next(iter(kwargs.keys()))
+ raise TypeError(
+ "{}() got an unexpected keyword argument {!r}".format(func_name, arg)
+ )
+ return kwargs["warn"]
+
+
+def _get_config_var(name, warn=False):
+ # type: (str, bool) -> Union[int, str, None]
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string):
+ # type: (str) -> str
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version):
+ # type: (PythonVersion) -> bool
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version, warn=False):
+ # type: (PythonVersion, bool) -> List[str]
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append("cp{version}".format(version=version))
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ warn = _warn_keyword_parameter("cpython_tags", kwargs)
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or _platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
+ yield tag
+ for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
+ yield tag
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi():
+ # type: () -> Iterator[str]
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter=None, # type: Optional[str]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ warn = _warn_keyword_parameter("generic_tags", kwargs)
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or _platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version):
+ # type: (PythonVersion) -> Iterator[str]
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
+ yield "py{major}".format(major=py_version[0])
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
+
+
+def compatible_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ interpreter=None, # type: Optional[str]
+ platforms=None, # type: Optional[Iterable[str]]
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or _platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
+ # type: (str, bool) -> str
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version, cpu_arch):
+ # type: (MacVersion, str) -> List[str]
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ formats.append("universal")
+ return formats
+
+
+def mac_platforms(version=None, arch=None):
+ # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver() # type: ignore
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+ for minor_version in range(version[1], -1, -1):
+ compat_version = version[0], minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+# From PEP 513.
+def _is_manylinux_compatible(name, glibc_version):
+ # type: (str, GlibcVersion) -> bool
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+
+ return bool(getattr(_manylinux, name + "_compatible"))
+ except (ImportError, AttributeError):
+ # Fall through to heuristic check below.
+ pass
+
+ return _have_compatible_glibc(*glibc_version)
+
+
+def _glibc_version_string():
+ # type: () -> Optional[str]
+ # Returns glibc version string, or None if not using glibc.
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
+ "CS_GNU_LIBC_VERSION"
+ )
+ assert version_string is not None
+ _, version = version_string.split() # type: Tuple[str, str]
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # Note: typeshed is wrong here so we are ignoring this line.
+ process_namespace = ctypes.CDLL(None) # type: ignore
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version() # type: str
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing.
+def _check_glibc_version(version_str, required_major, minimum_minor):
+ # type: (str, int, int) -> bool
+ # Parse string and check against requested version.
+ #
+ # We use a regexp instead of str.split because we want to discard any
+ # random junk that might come after the minor version -- this might happen
+ # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ # uses version strings like "2.20-2014.11"). See gh-3588.
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return False
+ return (
+ int(m.group("major")) == required_major
+ and int(m.group("minor")) >= minimum_minor
+ )
+
+
+def _have_compatible_glibc(required_major, minimum_minor):
+ # type: (int, int) -> bool
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return False
+ return _check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader(object):
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file):
+ # type: (IO[bytes]) -> None
+ def unpack(fmt):
+ # type: (str) -> int
+ try:
+ (result,) = struct.unpack(
+ fmt, file.read(struct.calcsize(fmt))
+ ) # type: (int, )
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header():
+ # type: () -> Optional[_ELFFileHeader]
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf():
+ # type: () -> bool
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686():
+ # type: () -> bool
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_manylinux_abi(arch):
+ # type: (str) -> bool
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return True
+
+
+def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
+ # type: (bool) -> Iterator[str]
+ linux = _normalize_string(distutils.util.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ manylinux_support = []
+ _, arch = linux.split("_", 1)
+ if _have_compatible_manylinux_abi(arch):
+ if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
+ manylinux_support.append(
+ ("manylinux2014", (2, 17))
+ ) # CentOS 7 w/ glibc 2.17 (PEP 599)
+ if arch in {"x86_64", "i686"}:
+ manylinux_support.append(
+ ("manylinux2010", (2, 12))
+ ) # CentOS 6 w/ glibc 2.12 (PEP 571)
+ manylinux_support.append(
+ ("manylinux1", (2, 5))
+ ) # CentOS 5 w/ glibc 2.5 (PEP 513)
+ manylinux_support_iter = iter(manylinux_support)
+ for name, glibc_version in manylinux_support_iter:
+ if _is_manylinux_compatible(name, glibc_version):
+ yield linux.replace("linux", name)
+ break
+ # Support for a later manylinux implies support for an earlier version.
+ for name, _ in manylinux_support_iter:
+ yield linux.replace("linux", name)
+ yield linux
+
+
+def _generic_platforms():
+ # type: () -> Iterator[str]
+ yield _normalize_string(distutils.util.get_platform())
+
+
+def _platform_tags():
+ # type: () -> Iterator[str]
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name():
+ # type: () -> str
+ """
+ Returns the name of the running interpreter.
+ """
+ try:
+ name = sys.implementation.name # type: ignore
+ except AttributeError: # pragma: no cover
+ # Python 2.7 compatibility.
+ name = platform.python_implementation().lower()
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(**kwargs):
+ # type: (bool) -> str
+ """
+ Returns the version of the running interpreter.
+ """
+ warn = _warn_keyword_parameter("interpreter_version", kwargs)
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version):
+ # type: (PythonVersion) -> str
+ if any(v >= 10 for v in version):
+ sep = "_"
+ else:
+ sep = ""
+ return sep.join(map(str, version))
+
+
+def sys_tags(**kwargs):
+ # type: (bool) -> Iterator[Tag]
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+ warn = _warn_keyword_parameter("sys_tags", kwargs)
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ for tag in cpython_tags(warn=warn):
+ yield tag
+ else:
+ for tag in generic_tags():
+ yield tag
+
+ for tag in compatible_tags():
+ yield tag
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py
new file mode 100644
index 0000000000..19579c1a0f
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py
@@ -0,0 +1,65 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from ._typing import TYPE_CHECKING, cast
+from .version import InvalidVersion, Version
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import NewType, Union
+
+ NormalizedName = NewType("NormalizedName", str)
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+ # type: (str) -> NormalizedName
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast("NormalizedName", value)
+
+
+def canonicalize_version(_version):
+ # type: (str) -> Union[Version, str]
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+
+ try:
+ version = Version(_version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return _version
+
+ parts = []
+
+ # Epoch
+ if version.epoch != 0:
+ parts.append("{0}!".format(version.epoch))
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
+
+ # Pre-release
+ if version.pre is not None:
+ parts.append("".join(str(x) for x in version.pre))
+
+ # Post-release
+ if version.post is not None:
+ parts.append(".post{0}".format(version.post))
+
+ # Development release
+ if version.dev is not None:
+ parts.append(".dev{0}".format(version.dev))
+
+ # Local version segment
+ if version.local is not None:
+ parts.append("+{0}".format(version.local))
+
+ return "".join(parts)
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py
new file mode 100644
index 0000000000..00371e86a8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py
@@ -0,0 +1,535 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity, NegativeInfinity
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+ from ._structures import InfinityType, NegativeInfinityType
+
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+ SubLocalType = Union[InfiniteTypes, int, str]
+ LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+ ]
+ CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+ ]
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+ VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+ ]
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version):
+ # type: (str) -> Union[LegacyVersion, Version]
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion(object):
+ _key = None # type: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._key)
+
+ def __lt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ # type: (object, VersionComparisonMethod) -> Union[bool, NotImplemented]
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version):
+ # type: (str) -> None
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ # type: () -> str
+ return self._version
+
+ def __repr__(self):
+ # type: () -> str
+ return "<LegacyVersion({0})>".format(repr(str(self)))
+
+ @property
+ def public(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ return -1
+
+ @property
+ def release(self):
+ # type: () -> None
+ return None
+
+ @property
+ def pre(self):
+ # type: () -> None
+ return None
+
+ @property
+ def post(self):
+ # type: () -> None
+ return None
+
+ @property
+ def dev(self):
+ # type: () -> None
+ return None
+
+ @property
+ def local(self):
+ # type: () -> None
+ return None
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ # type: (str) -> Iterator[str]
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # type: (str) -> LegacyCmpKey
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts = [] # type: List[str]
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version):
+ # type: (str) -> None
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Version({0})>".format(repr(str(self)))
+
+ def __str__(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(".post{0}".format(self.post))
+
+ # Development release
+ if self.dev is not None:
+ parts.append(".dev{0}".format(self.dev))
+
+ # Local version segment
+ if self.local is not None:
+ parts.append("+{0}".format(self.local))
+
+ return "".join(parts)
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ _epoch = self._version.epoch # type: int
+ return _epoch
+
+ @property
+ def release(self):
+ # type: () -> Tuple[int, ...]
+ _release = self._version.release # type: Tuple[int, ...]
+ return _release
+
+ @property
+ def pre(self):
+ # type: () -> Optional[Tuple[str, int]]
+ _pre = self._version.pre # type: Optional[Tuple[str, int]]
+ return _pre
+
+ @property
+ def post(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self):
+ # type: () -> Optional[str]
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self):
+ # type: () -> str
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return self.post is not None
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return self.dev is not None
+
+ @property
+ def major(self):
+ # type: () -> int
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self):
+ # type: () -> int
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self):
+ # type: () -> int
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter, # type: str
+ number, # type: Union[str, bytes, SupportsInt]
+):
+ # type: (...) -> Optional[Tuple[str, int]]
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ # type: (str) -> Optional[LocalType]
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch, # type: int
+ release, # type: Tuple[int, ...]
+ pre, # type: Optional[Tuple[str, int]]
+ post, # type: Optional[Tuple[str, int]]
+ dev, # type: Optional[Tuple[str, int]]
+ local, # type: Optional[Tuple[SubLocalType]]
+):
+ # type: (...) -> CmpKey
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre = NegativeInfinity # type: PrePostDevType
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post = NegativeInfinity # type: PrePostDevType
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev = Infinity # type: PrePostDevType
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local = NegativeInfinity # type: LocalType
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py b/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000000..cf75e1e5fc
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+ from pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+ from _thread import RLock
+except ImportError:
+ from threading import RLock
+
+try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping
+
+try:
+ from collections import OrderedDict as _OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict as _OrderedDict
+ except ImportError:
+ _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+ _MAX_INT = sys.maxsize
+ basestring = str
+ unichr = chr
+ _ustr = str
+
+ # build list of single arg builtins, that can be used as parse actions
+ singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+ _MAX_INT = sys.maxint
+ range = xrange
+
+ def _ustr(obj):
+ """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+ str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+ then < returns the unicode object | encodes it with the default encoding | ... >.
+ """
+ if isinstance(obj,unicode):
+ return obj
+
+ try:
+ # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+ # it won't break any existing code.
+ return str(obj)
+
+ except UnicodeEncodeError:
+ # Else encode it
+ ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+ xmlcharref = Regex(r'&#\d+;')
+ xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+ return xmlcharref.transformString(ret)
+
+ # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+ singleArgBuiltins = []
+ import __builtin__
+ for fname in "sum len sorted reversed list tuple set any all min max".split():
+ try:
+ singleArgBuiltins.append(getattr(__builtin__,fname))
+ except AttributeError:
+ continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+ """Escape &, <, >, ", ', etc. in a string of data."""
+
+ # ampersand must be replaced first
+ from_symbols = '&><"\''
+ to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+ for from_,to_ in zip(from_symbols, to_symbols):
+ data = data.replace(from_, to_)
+ return data
+
+class _Constants(object):
+ pass
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+_bslash = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, pstr, loc=0, msg=None, elem=None ):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ def __getattr__( self, aname ):
+ """supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+ """
+ if( aname == "lineno" ):
+ return lineno( self.loc, self.pstr )
+ elif( aname in ("col", "column") ):
+ return col( self.loc, self.pstr )
+ elif( aname == "line" ):
+ return line( self.loc, self.pstr )
+ else:
+ raise AttributeError(aname)
+
+ def __str__( self ):
+ return "%s (at char %d), (line:%d, col:%d)" % \
+ ( self.msg, self.loc, self.lineno, self.column )
+ def __repr__( self ):
+ return _ustr(self)
+ def markInputline( self, markerString = ">!<" ):
+ """Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join((line_str[:line_column],
+ markerString, line_str[line_column:]))
+ return line_str.strip()
+ def __dir__(self):
+ return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when parse expressions don't match class;
+ supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+
+ Example::
+ try:
+ Word(nums).setName("integer").parseString("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.col))
+
+ prints::
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+ """
+ pass
+
+class ParseFatalException(ParseBaseException):
+ """user-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately"""
+ pass
+
+class ParseSyntaxException(ParseFatalException):
+ """just like L{ParseFatalException}, but thrown internally when an
+ L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
+ immediately because an unbacktrackable syntax error has been found"""
+ pass
+
+#~ class ReparseException(ParseBaseException):
+ #~ """Experimental class - parse actions can raise this exception to cause
+ #~ pyparsing to reparse the input string:
+ #~ - with a modified input string, and/or
+ #~ - with a modified start location
+ #~ Set the values of the ReparseException in the constructor, and raise the
+ #~ exception in a parse action to cause pyparsing to use the new string/location.
+ #~ Setting the values as None causes no change to be made.
+ #~ """
+ #~ def __init_( self, newstring, restartLoc ):
+ #~ self.newParseText = newstring
+ #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+ """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+ def __init__( self, parseElementList ):
+ self.parseElementTrace = parseElementList
+
+ def __str__( self ):
+ return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+ def __init__(self,p1,p2):
+ self.tup = (p1,p2)
+ def __getitem__(self,i):
+ return self.tup[i]
+ def __repr__(self):
+ return repr(self.tup[0])
+ def setOffset(self,i):
+ self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+ """
+ Structured parse results, to provide multiple means of access to the parsed data:
+ - as a list (C{len(results)})
+ - by list index (C{results[0], results[1]}, etc.)
+ - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
+
+ Example::
+ integer = Word(nums)
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+ # equivalent form:
+ # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ # parseString returns a ParseResults object
+ result = date_str.parseString("1999/12/31")
+
+ def test(s, fn=repr):
+ print("%s -> %s" % (s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+ prints::
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: 31
+ - month: 12
+ - year: 1999
+ """
+ def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+ if isinstance(toklist, cls):
+ return toklist
+ retobj = object.__new__(cls)
+ retobj.__doinit = True
+ return retobj
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+ if self.__doinit:
+ self.__doinit = False
+ self.__name = None
+ self.__parent = None
+ self.__accumNames = {}
+ self.__asList = asList
+ self.__modal = modal
+ if toklist is None:
+ toklist = []
+ if isinstance(toklist, list):
+ self.__toklist = toklist[:]
+ elif isinstance(toklist, _generatorType):
+ self.__toklist = list(toklist)
+ else:
+ self.__toklist = [toklist]
+ self.__tokdict = dict()
+
+ if name is not None and name:
+ if not modal:
+ self.__accumNames[name] = 0
+ if isinstance(name,int):
+ name = _ustr(name) # will always return a str, but use _ustr for consistency
+ self.__name = name
+ if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+ if isinstance(toklist,basestring):
+ toklist = [ toklist ]
+ if asList:
+ if isinstance(toklist,ParseResults):
+ self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+ self[name].__name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError,TypeError,IndexError):
+ self[name] = toklist
+
+ def __getitem__( self, i ):
+ if isinstance( i, (int,slice) ):
+ return self.__toklist[i]
+ else:
+ if i not in self.__accumNames:
+ return self.__tokdict[i][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+ def __setitem__( self, k, v, isinstance=isinstance ):
+ if isinstance(v,_ParseResultsWithOffset):
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+ sub = v[0]
+ elif isinstance(k,(int,slice)):
+ self.__toklist[k] = v
+ sub = v
+ else:
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+ sub = v
+ if isinstance(sub,ParseResults):
+ sub.__parent = wkref(self)
+
+ def __delitem__( self, i ):
+ if isinstance(i,(int,slice)):
+ mylen = len( self.__toklist )
+ del self.__toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i+1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+ else:
+ del self.__tokdict[i]
+
+ def __contains__( self, k ):
+ return k in self.__tokdict
+
+ def __len__( self ): return len( self.__toklist )
+ def __bool__(self): return ( not not self.__toklist )
+ __nonzero__ = __bool__
+ def __iter__( self ): return iter( self.__toklist )
+ def __reversed__( self ): return iter( self.__toklist[::-1] )
+ def _iterkeys( self ):
+ if hasattr(self.__tokdict, "iterkeys"):
+ return self.__tokdict.iterkeys()
+ else:
+ return iter(self.__tokdict)
+
+ def _itervalues( self ):
+ return (self[k] for k in self._iterkeys())
+
+ def _iteritems( self ):
+ return ((k, self[k]) for k in self._iterkeys())
+
+ if PY_3:
+ keys = _iterkeys
+ """Returns an iterator of all named result keys (Python 3.x only)."""
+
+ values = _itervalues
+ """Returns an iterator of all named result values (Python 3.x only)."""
+
+ items = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+ else:
+ iterkeys = _iterkeys
+ """Returns an iterator of all named result keys (Python 2.x only)."""
+
+ itervalues = _itervalues
+ """Returns an iterator of all named result values (Python 2.x only)."""
+
+ iteritems = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+ def keys( self ):
+ """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iterkeys())
+
+ def values( self ):
+ """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.itervalues())
+
+ def items( self ):
+ """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iteritems())
+
+ def haskeys( self ):
+ """Since keys() returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self.__tokdict)
+
+ def pop( self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default=C{last}).
+ Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+ argument or an integer argument, it will use C{list} semantics
+ and pop tokens from the list of parsed tokens. If passed a
+ non-integer argument (most likely a string), it will use C{dict}
+ semantics and pop the corresponding value from any defined
+ results names. A second default return value argument is
+ supported, just as in C{dict.pop()}.
+
+ Example::
+ def remove_first(tokens):
+ tokens.pop(0)
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+ print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + OneOrMore(Word(nums))
+ print(patt.parseString("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.addParseAction(remove_LABEL)
+ print(patt.parseString("AAB 123 321").dump())
+ prints::
+ ['AAB', '123', '321']
+ - LABEL: AAB
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k,v in kwargs.items():
+ if k == 'default':
+ args = (args[0], v)
+ else:
+ raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+ if (isinstance(args[0], int) or
+ len(args) == 1 or
+ args[0] in self):
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, defaultValue=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given C{defaultValue} or C{None} if no
+ C{defaultValue} is specified.
+
+ Similar to C{dict.get()}.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return defaultValue
+
+ def insert( self, index, insStr ):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to C{list.insert()}.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self.__toklist.insert(index, insStr)
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+ def append( self, item ):
+ """
+ Add single element to end of ParseResults list of elements.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self.__toklist.append(item)
+
+ def extend( self, itemseq ):
+ """
+ Add sequence of elements to end of ParseResults list of elements.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self += itemseq
+ else:
+ self.__toklist.extend(itemseq)
+
+ def clear( self ):
+ """
+ Clear all elements and results names.
+ """
+ del self.__toklist[:]
+ self.__tokdict.clear()
+
+ def __getattr__( self, name ):
+ try:
+ return self[name]
+ except KeyError:
+ return ""
+
+ if name in self.__tokdict:
+ if name not in self.__accumNames:
+ return self.__tokdict[name][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[name] ])
+ else:
+ return ""
+
+ def __add__( self, other ):
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__( self, other ):
+ if other.__tokdict:
+ offset = len(self.__toklist)
+ addoffset = lambda a: offset if a<0 else a+offset
+ otheritems = other.__tokdict.items()
+ otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+ for (k,vlist) in otheritems for v in vlist]
+ for k,v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0],ParseResults):
+ v[0].__parent = wkref(self)
+
+ self.__toklist += other.__toklist
+ self.__accumNames.update( other.__accumNames )
+ return self
+
+ def __radd__(self, other):
+ if isinstance(other,int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__( self ):
+ return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+ def __str__( self ):
+ return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+ def _asStringList( self, sep='' ):
+ out = []
+ for item in self.__toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance( item, ParseResults ):
+ out += item._asStringList()
+ else:
+ out.append( _ustr(item) )
+ return out
+
+ def asList( self ):
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ result = patt.parseString("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use asList() to create an actual list
+ result_list = result.asList()
+ print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+ def asDict( self ):
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.asDict()
+ print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+ if PY_3:
+ item_fn = self.items
+ else:
+ item_fn = self.iteritems
+
+ def toItem(obj):
+ if isinstance(obj, ParseResults):
+ if obj.haskeys():
+ return obj.asDict()
+ else:
+ return [toItem(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k,toItem(v)) for k,v in item_fn())
+
+ def copy( self ):
+ """
+ Returns a new copy of a C{ParseResults} object.
+ """
+ ret = ParseResults( self.__toklist )
+ ret.__tokdict = self.__tokdict.copy()
+ ret.__parent = self.__parent
+ ret.__accumNames.update( self.__accumNames )
+ ret.__name = self.__name
+ return ret
+
+ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+ """
+ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+ """
+ nl = "\n"
+ out = []
+ namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+ for v in vlist)
+ nextLevelIndent = indent + " "
+
+ # collapse out indents if formatting is not desired
+ if not formatted:
+ indent = ""
+ nextLevelIndent = ""
+ nl = ""
+
+ selfTag = None
+ if doctag is not None:
+ selfTag = doctag
+ else:
+ if self.__name:
+ selfTag = self.__name
+
+ if not selfTag:
+ if namedItemsOnly:
+ return ""
+ else:
+ selfTag = "ITEM"
+
+ out += [ nl, indent, "<", selfTag, ">" ]
+
+ for i,res in enumerate(self.__toklist):
+ if isinstance(res,ParseResults):
+ if i in namedItems:
+ out += [ res.asXML(namedItems[i],
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ out += [ res.asXML(None,
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ # individual token, see if there is a name for it
+ resTag = None
+ if i in namedItems:
+ resTag = namedItems[i]
+ if not resTag:
+ if namedItemsOnly:
+ continue
+ else:
+ resTag = "ITEM"
+ xmlBodyText = _xml_escape(_ustr(res))
+ out += [ nl, nextLevelIndent, "<", resTag, ">",
+ xmlBodyText,
+ "</", resTag, ">" ]
+
+ out += [ nl, indent, "</", selfTag, ">" ]
+ return "".join(out)
+
+ def __lookup(self,sub):
+ for k,vlist in self.__tokdict.items():
+ for v,loc in vlist:
+ if sub is v:
+ return k
+ return None
+
+ def getName(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = OneOrMore(user_data)
+
+ result = user_info.parseString("22 111-22-3333 #221B")
+ for item in result:
+ print(item.getName(), ':', item[0])
+ prints::
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self.__name:
+ return self.__name
+ elif self.__parent:
+ par = self.__parent()
+ if par:
+ return par.__lookup(self)
+ else:
+ return None
+ elif (len(self) == 1 and
+ len(self.__tokdict) == 1 and
+ next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+ return next(iter(self.__tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent='', depth=0, full=True):
+ """
+ Diagnostic method for listing out the contents of a C{ParseResults}.
+ Accepts an optional C{indent} argument so that this string can be embedded
+ in a nested display of other data.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(result.dump())
+ prints::
+ ['12', '/', '31', '/', '1999']
+ - day: 1999
+ - month: 31
+ - year: 12
+ """
+ out = []
+ NL = '\n'
+ out.append( indent+_ustr(self.asList()) )
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k,v in self.items())
+ for k,v in items:
+ if out:
+ out.append(NL)
+ out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
+ if isinstance(v,ParseResults):
+ if v:
+ out.append( v.dump(indent,depth+1) )
+ else:
+ out.append(_ustr(v))
+ else:
+ out.append(repr(v))
+ elif any(isinstance(vv,ParseResults) for vv in self):
+ v = self
+ for i,vv in enumerate(v):
+ if isinstance(vv,ParseResults):
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
+ else:
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the C{pprint} module.
+ Accepts additional positional or keyword args as defined for the
+ C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+ Example::
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimitedList(term)))
+ result = func.parseString("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+ prints::
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.asList(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return ( self.__toklist,
+ ( self.__tokdict.copy(),
+ self.__parent is not None and self.__parent() or None,
+ self.__accumNames,
+ self.__name ) )
+
+ def __setstate__(self,state):
+ self.__toklist = state[0]
+ (self.__tokdict,
+ par,
+ inAccumNames,
+ self.__name) = state[1]
+ self.__accumNames = {}
+ self.__accumNames.update(inAccumNames)
+ if par is not None:
+ self.__parent = wkref(par)
+ else:
+ self.__parent = None
+
+ def __getnewargs__(self):
+ return self.__toklist, self.__name, self.__asList, self.__modal
+
+ def __dir__(self):
+ return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+ """Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+ """Returns current line number within a string, counting newlines as line separators.
+ The first line is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+ """Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ lastCR = strg.rfind("\n", 0, loc)
+ nextCR = strg.find("\n", loc)
+ if nextCR >= 0:
+ return strg[lastCR+1:nextCR]
+ else:
+ return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+ print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+ print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+ print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+ pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+ #~ if func in singleArgBuiltins:
+ #~ return lambda s,l,t: func(t)
+ #~ limit = 0
+ #~ foundArity = False
+ #~ def wrapper(*args):
+ #~ nonlocal limit,foundArity
+ #~ while 1:
+ #~ try:
+ #~ ret = func(*args[limit:])
+ #~ foundArity = True
+ #~ return ret
+ #~ except TypeError:
+ #~ if limit == maxargs or foundArity:
+ #~ raise
+ #~ limit += 1
+ #~ continue
+ #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+ if func in singleArgBuiltins:
+ return lambda s,l,t: func(t)
+ limit = [0]
+ foundArity = [False]
+
+ # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+ if system_version[:2] >= (3,5):
+ def extract_stack(limit=0):
+ # special handling for Python 3.5.0 - extra deep call stack by 1
+ offset = -3 if system_version == (3,5,0) else -2
+ frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+ return [frame_summary[:2]]
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+ else:
+ extract_stack = traceback.extract_stack
+ extract_tb = traceback.extract_tb
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ LINE_DIFF = 6
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ this_line = extract_stack(limit=2)[-1]
+ pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+ def wrapper(*args):
+ while 1:
+ try:
+ ret = func(*args[limit[0]:])
+ foundArity[0] = True
+ return ret
+ except TypeError:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if foundArity[0]:
+ raise
+ else:
+ try:
+ tb = sys.exc_info()[-1]
+ if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+ raise
+ finally:
+ del tb
+
+ if limit[0] <= maxargs:
+ limit[0] += 1
+ continue
+ raise
+
+ # copy func name to wrapper for sensible debug output
+ func_name = "<parse action>"
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ wrapper.__name__ = func_name
+
+ return wrapper
+
+class ParserElement(object):
+ """Abstract base level parser element class."""
+ DEFAULT_WHITE_CHARS = " \n\t\r"
+ verbose_stacktrace = False
+
+ @staticmethod
+ def setDefaultWhitespaceChars( chars ):
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+ # default whitespace chars are space, <TAB> and newline
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.setDefaultWhitespaceChars(" \t")
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ @staticmethod
+ def inlineLiteralsUsing(cls):
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inlineLiteralsUsing(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ def __init__( self, savelist=False ):
+ self.parseAction = list()
+ self.failAction = None
+ #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
+ self.strRepr = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ self.copyDefaultWhiteChars = True
+ self.mayReturnEmpty = False # used when checking for left-recursion
+ self.keepTabs = False
+ self.ignoreExprs = list()
+ self.debug = False
+ self.streamlined = False
+ self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+ self.errmsg = ""
+ self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+ self.debugActions = ( None, None, None ) #custom debug actions
+ self.re = None
+ self.callPreparse = True # used to avoid redundant calls to preParse
+ self.callDuringTry = False
+
+ def copy( self ):
+ """
+ Make a copy of this C{ParserElement}. Useful for defining different parse actions
+ for the same parsing pattern, using copies of the original parse element.
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+ integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+
+ print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+ prints::
+ [5120, 100, 655360, 268435456]
+ Equivalent form of C{expr.copy()} is just C{expr()}::
+ integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+ """
+ cpy = copy.copy( self )
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ return cpy
+
+ def setName( self, name ):
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+
+ Example::
+ Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+ Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.name = name
+ self.errmsg = "Expected " + self.name
+ if hasattr(self,"exception"):
+ self.exception.msg = self.errmsg
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+ NOTE: this returns a *copy* of the original C{ParserElement} object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ C{expr("name")} in place of C{expr.setResultsName("name")} -
+ see L{I{__call__}<__call__>}.
+
+ Example::
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches=True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def setBreak(self,breakFlag = True):
+ """Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set C{breakFlag} to True to enable, False to
+ disable.
+ """
+ if breakFlag:
+ _parseMethod = self._parse
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+ pdb.set_trace()
+ return _parseMethod( instring, loc, doActions, callPreParse )
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse,"_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def setParseAction( self, *fns, **kwargs ):
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+ Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+ C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+ If the functions in fns modify the tokens, they can return them as the return
+ value from fn, and the modified list of tokens will replace the original.
+ Otherwise, fn does not need to return any value.
+
+ Optional keyword arguments:
+ - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{parseString}<parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+ # use parse action to convert to ints at parse time
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ date_str = integer + '/' + integer + '/' + integer
+
+ # note that integer fields are now ints, not strings
+ date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
+ """
+ self.parseAction = list(map(_trim_arity, list(fns)))
+ self.callDuringTry = kwargs.get("callDuringTry", False)
+ return self
+
+ def addParseAction( self, *fns, **kwargs ):
+ """
+ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+
+ See examples in L{I{copy}<copy>}.
+ """
+ self.parseAction += list(map(_trim_arity, list(fns)))
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def addCondition(self, *fns, **kwargs):
+ """Add a boolean predicate function to expression's list of parse actions. See
+ L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
+ functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+ """
+ msg = kwargs.get("message", "failed user-defined condition")
+ exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+ for fn in fns:
+ def pa(s,l,t):
+ if not bool(_trim_arity(fn)(s,l,t)):
+ raise exc_type(s,l,msg)
+ self.parseAction.append(pa)
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def setFailAction( self, fn ):
+ """Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ C{fn(s,loc,expr,err)} where:
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+ The function returns no value. It may throw C{L{ParseFatalException}}
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables( self, instring, loc ):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc,dummy = e._parse( instring, loc )
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse( self, instring, loc ):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+
+ if self.skipWhitespace:
+ wt = self.whiteChars
+ instrlen = len(instring)
+ while loc < instrlen and instring[loc] in wt:
+ loc += 1
+
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ return loc, []
+
+ def postParse( self, instring, loc, tokenlist ):
+ return tokenlist
+
+ #~ @profile
+ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+ debugging = ( self.debug ) #and doActions )
+
+ if debugging or self.failAction:
+ #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+ if (self.debugActions[0] ):
+ self.debugActions[0]( instring, loc, self )
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ try:
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ except ParseBaseException as err:
+ #~ print ("Exception raised:", err)
+ if self.debugActions[2]:
+ self.debugActions[2]( instring, tokensStart, self, err )
+ if self.failAction:
+ self.failAction( instring, tokensStart, self, err )
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ else:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+ tokens = self.postParse( instring, loc, tokens )
+
+ retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ except ParseBaseException as err:
+ #~ print "Exception raised in user parse action:", err
+ if (self.debugActions[2] ):
+ self.debugActions[2]( instring, tokensStart, self, err )
+ raise
+ else:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ if debugging:
+ #~ print ("Matched",self,"->",retTokens.asList())
+ if (self.debugActions[1] ):
+ self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+ return loc, retTokens
+
+ def tryParse( self, instring, loc ):
+ try:
+ return self._parse( instring, loc, doActions=False )[0]
+ except ParseFatalException:
+ raise ParseException( instring, loc, self.errmsg, self)
+
+ def canParseNext(self, instring, loc):
+ try:
+ self.tryParse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ class _UnboundedCache(object):
+ def __init__(self):
+ cache = {}
+ self.not_in_cache = not_in_cache = object()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ if _OrderedDict is not None:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = _OrderedDict()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ else:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = {}
+ key_fifo = collections.deque([], size)
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(key_fifo) > size:
+ cache.pop(key_fifo.popleft(), None)
+ key_fifo.append(key)
+
+ def clear(self):
+ cache.clear()
+ key_fifo.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+ HIT, MISS = 0, 1
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy()))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if isinstance(value, Exception):
+ raise value
+ return (value[0], value[1].copy())
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def resetCache():
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+ _packratEnabled = False
+ @staticmethod
+ def enablePackrat(cache_size_limit=128):
+ """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+ - cache_size_limit - (default=C{128}) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method C{ParserElement.enablePackrat()}. If
+ your program uses C{psyco} to "compile as you go", you must call
+ C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
+ Python will crash. For best results, call C{enablePackrat()} immediately
+ after importing pyparsing.
+
+ Example::
+ import pyparsing
+ pyparsing.ParserElement.enablePackrat()
+ """
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = ParserElement._UnboundedCache()
+ else:
+ ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parseString( self, instring, parseAll=False ):
+ """
+ Execute the parse expression with the given string.
+ This is the main interface to the client code, once the complete
+ expression has been built.
+
+ If you want the grammar to require that the entire input string be
+ successfully parsed, then set C{parseAll} to True (equivalent to ending
+ the grammar with C{L{StringEnd()}}).
+
+ Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+ in order to report proper column numbers in parse actions.
+ If the input string contains tabs and
+ the grammar uses parse actions that use the C{loc} argument to index into the
+ string being parsed, you can ensure you have a consistent view of the input
+ string by:
+ - calling C{parseWithTabs} on your grammar before calling C{parseString}
+ (see L{I{parseWithTabs}<parseWithTabs>})
+ - define your parse action using the full C{(s,loc,toks)} signature, and
+ reference the input string using the parse action's C{s} argument
+ - explictly expand the tabs in your input string before calling
+ C{parseString}
+
+ Example::
+ Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
+ Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
+ """
+ ParserElement.resetCache()
+ if not self.streamlined:
+ self.streamline()
+ #~ self.saveAsList = True
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse( instring, 0 )
+ if parseAll:
+ loc = self.preParse( instring, loc )
+ se = Empty() + StringEnd()
+ se._parse( instring, loc )
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+ else:
+ return tokens
+
+ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ C{maxMatches} argument, to clip scanning after 'n' matches are found. If
+ C{overlap} is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See L{I{parseString}<parseString>} for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens,start,end in Word(alphas).scanString(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = _ustr(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn( instring, loc )
+ nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+ except ParseException:
+ loc = preloc+1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn( instring, loc )
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc+1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def transformString( self, instring ):
+ """
+ Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+ be returned from a parse action. To use C{transformString}, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking C{transformString()} on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. C{transformString()} returns the resulting transformed string.
+
+ Example::
+ wd = Word(alphas)
+ wd.setParseAction(lambda toks: toks[0].title())
+
+ print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+ Prints::
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out = []
+ lastE = 0
+ # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transformString and scanString
+ self.keepTabs = True
+ try:
+ for t,s,e in self.scanString( instring ):
+ out.append( instring[lastE:s] )
+ if t:
+ if isinstance(t,ParseResults):
+ out += t.asList()
+ elif isinstance(t,list):
+ out += t
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join(map(_ustr,_flatten(out)))
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def searchString( self, instring, maxMatches=_MAX_INT ):
+ """
+ Another extension to C{L{scanString}}, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ C{maxMatches} argument, to clip searching after 'n' matches are found.
+
+ Example::
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+ prints::
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ try:
+ return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional C{maxsplit} argument, to limit the number of splits;
+ and the optional C{includeSeparators} argument (default=C{False}), if the separating
+ matching text should be included in the split results.
+
+ Example::
+ punc = oneOf(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+ prints::
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ splits = 0
+ last = 0
+ for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other ):
+ """
+ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+ converts them to L{Literal}s by default.
+
+ Example::
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+ Prints::
+ Hello, World! -> ['Hello', ',', 'World', '!']
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return And( [ self, other ] )
+
+ def __radd__(self, other ):
+ """
+ Implementation of + operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other + self
+
+ def __sub__(self, other):
+ """
+ Implementation of - operator, returns C{L{And}} with error stop
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other ):
+ """
+ Implementation of - operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other - self
+
+ def __mul__(self,other):
+ """
+ Implementation of * operator, allows use of C{expr * 3} in place of
+ C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
+ tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
+ may also include C{None} as in:
+ - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+ to C{expr*n + L{ZeroOrMore}(expr)}
+ (read as "at least n instances of C{expr}")
+ - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+ (read as "0 to n instances of C{expr}")
+ - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+ - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+ Note that C{expr*(None,n)} does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ C{expr*(None,n)} does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ C{expr*(None,n) + ~expr}
+ """
+ if isinstance(other,int):
+ minElements, optElements = other,0
+ elif isinstance(other,tuple):
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0],int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self*other[0] + ZeroOrMore(self)
+ elif isinstance(other[0],int) and isinstance(other[1],int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError("second tuple value must be greater or equal to first tuple value")
+ if minElements == optElements == 0:
+ raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+ if (optElements):
+ def makeOptionalList(n):
+ if n>1:
+ return Optional(self + makeOptionalList(n-1))
+ else:
+ return Optional(self)
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self]*minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self]*minElements)
+ return ret
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __or__(self, other ):
+ """
+ Implementation of | operator - returns C{L{MatchFirst}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return MatchFirst( [ self, other ] )
+
+ def __ror__(self, other ):
+ """
+ Implementation of | operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other | self
+
+ def __xor__(self, other ):
+ """
+ Implementation of ^ operator - returns C{L{Or}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Or( [ self, other ] )
+
+ def __rxor__(self, other ):
+ """
+ Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other ^ self
+
+ def __and__(self, other ):
+ """
+ Implementation of & operator - returns C{L{Each}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Each( [ self, other ] )
+
+ def __rand__(self, other ):
+ """
+ Implementation of & operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other & self
+
+ def __invert__( self ):
+ """
+ Implementation of ~ operator - returns C{L{NotAny}}
+ """
+ return NotAny( self )
+
+ def __call__(self, name=None):
+ """
+ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+
+ If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+ passed as C{True}.
+
+ If C{name} is omitted, same as calling C{L{copy}}.
+
+ Example::
+ # these are equivalent
+ userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+ userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+ """
+ if name is not None:
+ return self.setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress( self ):
+ """
+ Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress( self )
+
+ def leaveWhitespace( self ):
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ C{ParserElement}'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+ """
+ self.skipWhitespace = False
+ return self
+
+ def setWhitespaceChars( self, chars ):
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = chars
+ self.copyDefaultWhiteChars = False
+ return self
+
+ def parseWithTabs( self ):
+ """
+ Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+ Must be called before C{parseString} when the input grammar contains elements that
+ match C{<TAB>} characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore( self, other ):
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+ patt.ignore(cStyleComment)
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+ """
+ if isinstance(other, basestring):
+ other = Suppress(other)
+
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append( Suppress( other.copy() ) )
+ return self
+
+ def setDebugActions( self, startAction, successAction, exceptionAction ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ """
+ self.debugActions = (startAction or _defaultStartDebugAction,
+ successAction or _defaultSuccessDebugAction,
+ exceptionAction or _defaultExceptionDebugAction)
+ self.debug = True
+ return self
+
+ def setDebug( self, flag=True ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set C{flag} to True to enable, False to disable.
+
+ Example::
+ wd = Word(alphas).setName("alphaword")
+ integer = Word(nums).setName("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.setDebug()
+
+ OneOrMore(term).parseString("abc 123 xyz 890")
+
+ prints::
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using L{setDebugActions}. Prior to attempting
+ to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
+ is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+ message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+ which makes debugging and exception messages easier to understand - for instance, the default
+ name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+ """
+ if flag:
+ self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+ else:
+ self.debug = False
+ return self
+
+ def __str__( self ):
+ return self.name
+
+ def __repr__( self ):
+ return _ustr(self)
+
+ def streamline( self ):
+ self.streamlined = True
+ self.strRepr = None
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ pass
+
+ def validate( self, validateTrace=[] ):
+ """
+ Check defined expressions for valid structure, check for infinite recursive definitions.
+ """
+ self.checkRecursion( [] )
+
+ def parseFile( self, file_or_filename, parseAll=False ):
+ """
+ Execute the parse expression on the given file or filename.
+ If a filename is specified (instead of a file object),
+ the entire file is opened, read, and closed before parsing.
+ """
+ try:
+ file_contents = file_or_filename.read()
+ except AttributeError:
+ with open(file_or_filename, "r") as f:
+ file_contents = f.read()
+ try:
+ return self.parseString(file_contents, parseAll)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def __eq__(self,other):
+ if isinstance(other, ParserElement):
+ return self is other or vars(self) == vars(other)
+ elif isinstance(other, basestring):
+ return self.matches(other)
+ else:
+ return super(ParserElement,self)==other
+
+ def __ne__(self,other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash(id(self))
+
+ def __req__(self,other):
+ return self == other
+
+ def __rne__(self,other):
+ return not (self == other)
+
+ def matches(self, testString, parseAll=True):
+ """
+ Method for quick testing of a parser against a test string. Good for simple
+ inline microtests of sub expressions while building up larger parser.
+
+ Parameters:
+ - testString - to test against this expression for a match
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+
+ Example::
+ expr = Word(nums)
+ assert expr.matches("100")
+ """
+ try:
+ self.parseString(_ustr(testString), parseAll=parseAll)
+ return True
+ except ParseBaseException:
+ return False
+
+ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+ """
+ Execute the parse expression on a series of test strings, showing each
+ test, the parsed results or where the parse failed. Quick and easy way to
+ run a parse expression against a list of sample strings.
+
+ Parameters:
+ - tests - a list of separate test strings, or a multiline string of test strings
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+ - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
+ string; pass None to disable comment filtering
+ - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+ if False, only dump nested list
+ - printResults - (default=C{True}) prints test output to stdout
+ - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+ Returns: a (success, results) tuple, where success indicates that all tests succeeded
+ (or failed if C{failureTests} is True), and the results contain a list of lines of each
+ test's output
+
+ Example::
+ number_expr = pyparsing_common.number.copy()
+
+ result = number_expr.runTests('''
+ # unsigned integer
+ 100
+ # negative integer
+ -100
+ # float with scientific notation
+ 6.02e23
+ # integer with scientific notation
+ 1e-12
+ ''')
+ print("Success" if result[0] else "Failed!")
+
+ result = number_expr.runTests('''
+ # stray character
+ 100Z
+ # missing leading digit before '.'
+ -.100
+ # too many '.'
+ 3.14.159
+ ''', failureTests=True)
+ print("Success" if result[0] else "Failed!")
+ prints::
+ # unsigned integer
+ 100
+ [100]
+
+ # negative integer
+ -100
+ [-100]
+
+ # float with scientific notation
+ 6.02e23
+ [6.02e+23]
+
+ # integer with scientific notation
+ 1e-12
+ [1e-12]
+
+ Success
+
+ # stray character
+ 100Z
+ ^
+ FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+ # missing leading digit before '.'
+ -.100
+ ^
+ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+ # too many '.'
+ 3.14.159
+ ^
+ FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+ Success
+
+ Each test string must be on a single line. If you want to test a string that spans multiple
+ lines, create a test like this::
+
+ expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+
+ (Note that this is a raw string literal, you must include the leading 'r'.)
+ """
+ if isinstance(tests, basestring):
+ tests = list(map(str.strip, tests.rstrip().splitlines()))
+ if isinstance(comment, basestring):
+ comment = Literal(comment)
+ allResults = []
+ comments = []
+ success = True
+ for t in tests:
+ if comment is not None and comment.matches(t, False) or comments and not t:
+ comments.append(t)
+ continue
+ if not t:
+ continue
+ out = ['\n'.join(comments), t]
+ comments = []
+ try:
+ t = t.replace(r'\n','\n')
+ result = self.parseString(t, parseAll=parseAll)
+ out.append(result.dump(full=fullDump))
+ success = success and not failureTests
+ except ParseBaseException as pe:
+ fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+ if '\n' in t:
+ out.append(line(pe.loc, t))
+ out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+ else:
+ out.append(' '*pe.loc + '^' + fatal)
+ out.append("FAIL: " + str(pe))
+ success = success and failureTests
+ result = pe
+ except Exception as exc:
+ out.append("FAIL-EXCEPTION: " + str(exc))
+ success = success and failureTests
+ result = exc
+
+ if printResults:
+ if fullDump:
+ out.append('')
+ print('\n'.join(out))
+
+ allResults.append((t, result))
+
+ return success, allResults
+
+
+class Token(ParserElement):
+ """
+ Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+ """
+ def __init__( self ):
+ super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+ """
+ An empty token, will always match.
+ """
+ def __init__( self ):
+ super(Empty,self).__init__()
+ self.name = "Empty"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class NoMatch(Token):
+ """
+ A token that will never match.
+ """
+ def __init__( self ):
+ super(NoMatch,self).__init__()
+ self.name = "NoMatch"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.errmsg = "Unmatchable token"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+ """
+ Token to exactly match a specified string.
+
+ Example::
+ Literal('blah').parseString('blah') # -> ['blah']
+ Literal('blah').parseString('blahfooblah') # -> ['blah']
+ Literal('blah').parseString('bla') # -> Exception: Expected "blah"
+
+ For case-insensitive matching, use L{CaselessLiteral}.
+
+ For keyword matching (force word break before and after the matched string),
+ use L{Keyword} or L{CaselessKeyword}.
+ """
+ def __init__( self, matchString ):
+ super(Literal,self).__init__()
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Literal; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.__class__ = Empty
+ self.name = '"%s"' % _ustr(self.match)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+
+ # Performance tuning: this routine gets called a *lot*
+ # if this is a single character match string and the first character matches,
+ # short-circuit as quickly as possible, and avoid calling startswith
+ #~ @profile
+ def parseImpl( self, instring, loc, doActions=True ):
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+ """
+ Token to exactly match a specified string as a keyword, that is, it must be
+ immediately followed by a non-keyword character. Compare with C{L{Literal}}:
+ - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+ - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+ Accepts two optional constructor arguments in addition to the keyword string:
+ - C{identChars} is a string of characters that would be valid identifier characters,
+ defaulting to all alphanumerics + "_" and "$"
+ - C{caseless} allows case-insensitive matching, default is C{False}.
+
+ Example::
+ Keyword("start").parseString("start") # -> ['start']
+ Keyword("start").parseString("starting") # -> Exception
+
+ For case-insensitive matching, use L{CaselessKeyword}.
+ """
+ DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+ def __init__( self, matchString, identChars=None, caseless=False ):
+ super(Keyword,self).__init__()
+ if identChars is None:
+ identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Keyword; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.name = '"%s"' % self.match
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+ self.caseless = caseless
+ if caseless:
+ self.caselessmatch = matchString.upper()
+ identChars = identChars.upper()
+ self.identChars = set(identChars)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.caseless:
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+ (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ else:
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+ (loc == 0 or instring[loc-1] not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def copy(self):
+ c = super(Keyword,self).copy()
+ c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ return c
+
+ @staticmethod
+ def setDefaultKeywordChars( chars ):
+ """Overrides the default Keyword chars
+ """
+ Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+ """
+ Token to match a specified string, ignoring case of letters.
+ Note: the matched results will always be in the case of the given
+ match string, NOT the case of the input text.
+
+ Example::
+ OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+
+ (Contrast with example for L{CaselessKeyword}.)
+ """
+ def __init__( self, matchString ):
+ super(CaselessLiteral,self).__init__( matchString.upper() )
+ # Preserve the defining literal.
+ self.returnString = matchString
+ self.name = "'%s'" % self.returnString
+ self.errmsg = "Expected " + self.name
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[ loc:loc+self.matchLen ].upper() == self.match:
+ return loc+self.matchLen, self.returnString
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+ """
+ Caseless version of L{Keyword}.
+
+ Example::
+ OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+
+ (Contrast with example for L{CaselessLiteral}.)
+ """
+ def __init__( self, matchString, identChars=None ):
+ super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+ """
+ A variation on L{Literal} which matches "close" matches, that is,
+ strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+ - C{match_string} - string to be matched
+ - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+
+ The results from a successful parse will contain the matched text from the input string and the following named results:
+ - C{mismatches} - a list of the positions within the match_string where mismatches were found
+ - C{original} - the original match_string used to compare against the input string
+
+ If C{mismatches} is an empty list, then the match was an exact match.
+
+ Example::
+ patt = CloseMatch("ATCATCGAATGGA")
+ patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+ patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+ # exact match
+ patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+ # close match allowing up to 2 mismatches
+ patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+ patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+ """
+ def __init__(self, match_string, maxMismatches=1):
+ super(CloseMatch,self).__init__()
+ self.name = match_string
+ self.match_string = match_string
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.match_string)
+
+ if maxloc <= instrlen:
+ match_string = self.match_string
+ match_stringloc = 0
+ mismatches = []
+ maxMismatches = self.maxMismatches
+
+ for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+ src,mat = s_m
+ if src != mat:
+ mismatches.append(match_stringloc)
+ if len(mismatches) > maxMismatches:
+ break
+ else:
+ loc = match_stringloc + 1
+ results = ParseResults([instring[start:loc]])
+ results['original'] = self.match_string
+ results['mismatches'] = mismatches
+ return loc, results
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+ """
+ Token for matching words composed of allowed character sets.
+ Defined with string containing all allowed initial characters,
+ an optional string containing allowed body characters (if omitted,
+ defaults to the initial character set), and an optional minimum,
+ maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction. An optional
+ C{excludeChars} parameter can list characters that might be found in
+ the input C{bodyChars} string; useful to define a word of all printables
+ except for one or two characters, for instance.
+
+ L{srange} is useful for defining custom character set strings for defining
+ C{Word} expressions, using range notation from regular expression character sets.
+
+ A common mistake is to use C{Word} to match a specific literal string, as in
+ C{Word("Address")}. Remember that C{Word} uses the string argument to define
+ I{sets} of matchable characters. This expression would match "Add", "AAA",
+ "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+ To match an exact literal string, use L{Literal} or L{Keyword}.
+
+ pyparsing includes helper strings for building Words:
+ - L{alphas}
+ - L{nums}
+ - L{alphanums}
+ - L{hexnums}
+ - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+ - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+ - L{printables} (any non-whitespace character)
+
+ Example::
+ # a word composed of digits
+ integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+ # a word with a leading capital, and zero or more lowercase
+ capital_word = Word(alphas.upper(), alphas.lower())
+
+ # hostnames are alphanumeric, with leading alpha, and '-'
+ hostname = Word(alphas, alphanums+'-')
+
+ # roman numeral (not a strict parser, accepts invalid mix of characters)
+ roman = Word("IVXLCDM")
+
+ # any string of non-whitespace characters, except for ','
+ csv_value = Word(printables, excludeChars=",")
+ """
+ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+ super(Word,self).__init__()
+ if excludeChars:
+ initChars = ''.join(c for c in initChars if c not in excludeChars)
+ if bodyChars:
+ bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+ self.initCharsOrig = initChars
+ self.initChars = set(initChars)
+ if bodyChars :
+ self.bodyCharsOrig = bodyChars
+ self.bodyChars = set(bodyChars)
+ else:
+ self.bodyCharsOrig = initChars
+ self.bodyChars = set(initChars)
+
+ self.maxSpecified = max > 0
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asKeyword = asKeyword
+
+ if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+ if self.bodyCharsOrig == self.initCharsOrig:
+ self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+ elif len(self.initCharsOrig) == 1:
+ self.reString = "%s[%s]*" % \
+ (re.escape(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ else:
+ self.reString = "[%s][%s]*" % \
+ (_escapeRegexRangeChars(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ if self.asKeyword:
+ self.reString = r"\b"+self.reString+r"\b"
+ try:
+ self.re = re.compile( self.reString )
+ except Exception:
+ self.re = None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.re:
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ return loc, result.group()
+
+ if not(instring[ loc ] in self.initChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ instrlen = len(instring)
+ bodychars = self.bodyChars
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, instrlen )
+ while loc < maxloc and instring[loc] in bodychars:
+ loc += 1
+
+ throwException = False
+ if loc - start < self.minLen:
+ throwException = True
+ if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+ throwException = True
+ if self.asKeyword:
+ if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+ throwException = True
+
+ if throwException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(Word,self).__str__()
+ except Exception:
+ pass
+
+
+ if self.strRepr is None:
+
+ def charsAsStr(s):
+ if len(s)>4:
+ return s[:4]+"..."
+ else:
+ return s
+
+ if ( self.initCharsOrig != self.bodyCharsOrig ):
+ self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+ else:
+ self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+ return self.strRepr
+
+
+class Regex(Token):
+ r"""
+ Token for matching strings that match a given regular expression.
+ Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+ If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
+ named parse results.
+
+ Example::
+ realnum = Regex(r"[+-]?\d+\.\d*")
+ date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+ # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+ roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+ """
+ compiledREtype = type(re.compile("[A-Z]"))
+ def __init__( self, pattern, flags=0):
+ """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+ super(Regex,self).__init__()
+
+ if isinstance(pattern, basestring):
+ if not pattern:
+ warnings.warn("null string passed to Regex; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+
+ self.pattern = pattern
+ self.flags = flags
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ elif isinstance(pattern, Regex.compiledREtype):
+ self.re = pattern
+ self.pattern = \
+ self.reString = str(pattern)
+ self.flags = flags
+
+ else:
+ raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ d = result.groupdict()
+ ret = ParseResults(result.group())
+ if d:
+ for k in d:
+ ret[k] = d[k]
+ return loc,ret
+
+ def __str__( self ):
+ try:
+ return super(Regex,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+ return self.strRepr
+
+
+class QuotedString(Token):
+ r"""
+ Token for matching strings that are delimited by quoting characters.
+
+ Defined with the following parameters:
+ - quoteChar - string of one or more characters defining the quote delimiting string
+ - escChar - character to escape quotes, typically backslash (default=C{None})
+ - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+ - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+ - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+ - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+ - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+ Example::
+ qs = QuotedString('"')
+ print(qs.searchString('lsjdf "This is the quote" sldjf'))
+ complex_qs = QuotedString('{{', endQuoteChar='}}')
+ print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+ sql_qs = QuotedString('"', escQuote='""')
+ print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+ prints::
+ [['This is the quote']]
+ [['This is the "quote"']]
+ [['This is the quote with "embedded" quotes']]
+ """
+ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+ super(QuotedString,self).__init__()
+
+ # remove white space from quote chars - wont work anyway
+ quoteChar = quoteChar.strip()
+ if not quoteChar:
+ warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ if endQuoteChar is None:
+ endQuoteChar = quoteChar
+ else:
+ endQuoteChar = endQuoteChar.strip()
+ if not endQuoteChar:
+ warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ self.quoteChar = quoteChar
+ self.quoteCharLen = len(quoteChar)
+ self.firstQuoteChar = quoteChar[0]
+ self.endQuoteChar = endQuoteChar
+ self.endQuoteCharLen = len(endQuoteChar)
+ self.escChar = escChar
+ self.escQuote = escQuote
+ self.unquoteResults = unquoteResults
+ self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+ if multiline:
+ self.flags = re.MULTILINE | re.DOTALL
+ self.pattern = r'%s(?:[^%s%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ else:
+ self.flags = 0
+ self.pattern = r'%s(?:[^%s\n\r%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ if len(self.endQuoteChar) > 1:
+ self.pattern += (
+ '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+ _escapeRegexRangeChars(self.endQuoteChar[i]))
+ for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+ )
+ if escQuote:
+ self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+ if escChar:
+ self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+ self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+ self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.group()
+
+ if self.unquoteResults:
+
+ # strip off quotes
+ ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+ if isinstance(ret,basestring):
+ # replace escaped whitespace
+ if '\\' in ret and self.convertWhitespaceEscapes:
+ ws_map = {
+ r'\t' : '\t',
+ r'\n' : '\n',
+ r'\f' : '\f',
+ r'\r' : '\r',
+ }
+ for wslit,wschar in ws_map.items():
+ ret = ret.replace(wslit, wschar)
+
+ # replace escaped characters
+ if self.escChar:
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+ # replace escaped quotes
+ if self.escQuote:
+ ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+ return loc, ret
+
+ def __str__( self ):
+ try:
+ return super(QuotedString,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+ return self.strRepr
+
+
+class CharsNotIn(Token):
+ """
+ Token for matching words composed of characters I{not} in a given set (will
+ include whitespace in matched characters if not listed in the provided exclusion set - see example).
+ Defined with string containing all disallowed characters, and an optional
+ minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction.
+
+ Example::
+ # define a comma-separated-value as anything that is not a ','
+ csv_value = CharsNotIn(',')
+ print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+ prints::
+ ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+ """
+ def __init__( self, notChars, min=1, max=0, exact=0 ):
+ super(CharsNotIn,self).__init__()
+ self.skipWhitespace = False
+ self.notChars = notChars
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = ( self.minLen == 0 )
+ self.mayIndexError = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[loc] in self.notChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ notchars = self.notChars
+ maxlen = min( start+self.maxLen, len(instring) )
+ while loc < maxlen and \
+ (instring[loc] not in notchars):
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(CharsNotIn, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ if len(self.notChars) > 4:
+ self.strRepr = "!W:(%s...)" % self.notChars[:4]
+ else:
+ self.strRepr = "!W:(%s)" % self.notChars
+
+ return self.strRepr
+
+class White(Token):
+ """
+ Special matching class for matching whitespace. Normally, whitespace is ignored
+ by pyparsing grammars. This class is included when some whitespace structures
+ are significant. Define with a string containing the whitespace characters to be
+ matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
+ as defined for the C{L{Word}} class.
+ """
+ whiteStrs = {
+ " " : "<SPC>",
+ "\t": "<TAB>",
+ "\n": "<LF>",
+ "\r": "<CR>",
+ "\f": "<FF>",
+ }
+ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+ super(White,self).__init__()
+ self.matchWhite = ws
+ self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+ #~ self.leaveWhitespace()
+ self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+ self.mayReturnEmpty = True
+ self.errmsg = "Expected " + self.name
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if not(instring[ loc ] in self.matchWhite):
+ raise ParseException(instring, loc, self.errmsg, self)
+ start = loc
+ loc += 1
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, len(instring) )
+ while loc < maxloc and instring[loc] in self.matchWhite:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+ def __init__( self ):
+ super(_PositionToken,self).__init__()
+ self.name=self.__class__.__name__
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+ """
+ Token to advance to a specific column of input text; useful for tabular report scraping.
+ """
+ def __init__( self, colno ):
+ super(GoToColumn,self).__init__()
+ self.col = colno
+
+ def preParse( self, instring, loc ):
+ if col(loc,instring) != self.col:
+ instrlen = len(instring)
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+ while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+ loc += 1
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ thiscol = col( loc, instring )
+ if thiscol > self.col:
+ raise ParseException( instring, loc, "Text not in expected column", self )
+ newloc = loc + self.col - thiscol
+ ret = instring[ loc: newloc ]
+ return newloc, ret
+
+
+class LineStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of a line within the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+ print(t)
+
+ Prints::
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+ def __init__( self ):
+ super(LineStart,self).__init__()
+ self.errmsg = "Expected start of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if col(loc, instring) == 1:
+ return loc, []
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+ """
+ Matches if current position is at the end of a line within the parse string
+ """
+ def __init__( self ):
+ super(LineEnd,self).__init__()
+ self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+ self.errmsg = "Expected end of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc<len(instring):
+ if instring[loc] == "\n":
+ return loc+1, "\n"
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of the parse string
+ """
+ def __init__( self ):
+ super(StringStart,self).__init__()
+ self.errmsg = "Expected start of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc != 0:
+ # see if entire string up to here is just whitespace and ignoreables
+ if loc != self.preParse( instring, 0 ):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class StringEnd(_PositionToken):
+ """
+ Matches if current position is at the end of the parse string
+ """
+ def __init__( self ):
+ super(StringEnd,self).__init__()
+ self.errmsg = "Expected end of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc < len(instring):
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ elif loc > len(instring):
+ return loc, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+ """
+ Matches if the current position is at the beginning of a Word, and
+ is not preceded by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+ the string being parsed, or at the beginning of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordStart,self).__init__()
+ self.wordChars = set(wordChars)
+ self.errmsg = "Not at the start of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ if loc != 0:
+ if (instring[loc-1] in self.wordChars or
+ instring[loc] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class WordEnd(_PositionToken):
+ """
+ Matches if the current position is at the end of a Word, and
+ is not followed by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+ the string being parsed, or at the end of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordEnd,self).__init__()
+ self.wordChars = set(wordChars)
+ self.skipWhitespace = False
+ self.errmsg = "Not at the end of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ instrlen = len(instring)
+ if instrlen>0 and loc<instrlen:
+ if (instring[loc] in self.wordChars or
+ instring[loc-1] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class ParseExpression(ParserElement):
+ """
+ Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(ParseExpression,self).__init__(savelist)
+ if isinstance( exprs, _generatorType ):
+ exprs = list(exprs)
+
+ if isinstance( exprs, basestring ):
+ self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+ elif isinstance( exprs, Iterable ):
+ exprs = list(exprs)
+ # if sequence of strings provided, wrap with Literal
+ if all(isinstance(expr, basestring) for expr in exprs):
+ exprs = map(ParserElement._literalStringClass, exprs)
+ self.exprs = list(exprs)
+ else:
+ try:
+ self.exprs = list( exprs )
+ except TypeError:
+ self.exprs = [ exprs ]
+ self.callPreparse = False
+
+ def __getitem__( self, i ):
+ return self.exprs[i]
+
+ def append( self, other ):
+ self.exprs.append( other )
+ self.strRepr = None
+ return self
+
+ def leaveWhitespace( self ):
+ """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+ all contained expressions."""
+ self.skipWhitespace = False
+ self.exprs = [ e.copy() for e in self.exprs ]
+ for e in self.exprs:
+ e.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def __str__( self ):
+ try:
+ return super(ParseExpression,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+ return self.strRepr
+
+ def streamline( self ):
+ super(ParseExpression,self).streamline()
+
+ for e in self.exprs:
+ e.streamline()
+
+ # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+ # but only if there are no parse actions or resultsNames on the nested And's
+ # (likewise for Or's and MatchFirst's)
+ if ( len(self.exprs) == 2 ):
+ other = self.exprs[0]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = other.exprs[:] + [ self.exprs[1] ]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ other = self.exprs[-1]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = self.exprs[:-1] + other.exprs[:]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ self.errmsg = "Expected " + _ustr(self)
+
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+ return ret
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ for e in self.exprs:
+ e.validate(tmp)
+ self.checkRecursion( [] )
+
+ def copy(self):
+ ret = super(ParseExpression,self).copy()
+ ret.exprs = [e.copy() for e in self.exprs]
+ return ret
+
+class And(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found in the given order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'+'} operator.
+ May also be constructed using the C{'-'} operator, which will suppress backtracking.
+
+ Example::
+ integer = Word(nums)
+ name_expr = OneOrMore(Word(alphas))
+
+ expr = And([integer("id"),name_expr("name"),integer("age")])
+ # more easily written as:
+ expr = integer("id") + name_expr("name") + integer("age")
+ """
+
+ class _ErrorStop(Empty):
+ def __init__(self, *args, **kwargs):
+ super(And._ErrorStop,self).__init__(*args, **kwargs)
+ self.name = '-'
+ self.leaveWhitespace()
+
+ def __init__( self, exprs, savelist = True ):
+ super(And,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.setWhitespaceChars( self.exprs[0].whiteChars )
+ self.skipWhitespace = self.exprs[0].skipWhitespace
+ self.callPreparse = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ # pass False as last arg to _parse for first element, since we already
+ # pre-parsed the string as part of our And pre-parsing
+ loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+ errorStop = False
+ for e in self.exprs[1:]:
+ if isinstance(e, And._ErrorStop):
+ errorStop = True
+ continue
+ if errorStop:
+ try:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ except ParseSyntaxException:
+ raise
+ except ParseBaseException as pe:
+ pe.__traceback__ = None
+ raise ParseSyntaxException._from_exception(pe)
+ except IndexError:
+ raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ if exprtokens or exprtokens.haskeys():
+ resultlist += exprtokens
+ return loc, resultlist
+
+ def __iadd__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #And( [ self, other ] )
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+ if not e.mayReturnEmpty:
+ break
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+
+class Or(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the expression that matches the longest string will be used.
+ May be constructed using the C{'^'} operator.
+
+ Example::
+ # construct Or using '^' operator
+
+ number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789"))
+ prints::
+ [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(Or,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ matches = []
+ for e in self.exprs:
+ try:
+ loc2 = e.tryParse( instring, loc )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+ else:
+ # save match among all matches, to retry longest to shortest
+ matches.append((loc2, e))
+
+ if matches:
+ matches.sort(key=lambda x: -x[0])
+ for _,e in matches:
+ try:
+ return e._parse( instring, loc, doActions )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+ def __ixor__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #Or( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the first one listed is the one that will match.
+ May be constructed using the C{'|'} operator.
+
+ Example::
+ # construct MatchFirst using '|' operator
+
+ # watch the order of expressions to match
+ number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+ # put more selective expression first
+ number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+ print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(MatchFirst,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ for e in self.exprs:
+ try:
+ ret = e._parse( instring, loc, doActions )
+ return ret
+ except ParseException as err:
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+
+ # only got here if no expression matched, raise exception for match that made it the furthest
+ else:
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+ def __ior__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #MatchFirst( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found, but in any order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'&'} operator.
+
+ Example::
+ color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+ shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+ integer = Word(nums)
+ shape_attr = "shape:" + shape_type("shape")
+ posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+ color_attr = "color:" + color("color")
+ size_attr = "size:" + integer("size")
+
+ # use Each (using operator '&') to accept attributes in any order
+ # (shape and posn are required, color and size are optional)
+ shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+ shape_spec.runTests('''
+ shape: SQUARE color: BLACK posn: 100, 120
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ color:GREEN size:20 shape:TRIANGLE posn:20,40
+ '''
+ )
+ prints::
+ shape: SQUARE color: BLACK posn: 100, 120
+ ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+ - color: BLACK
+ - posn: ['100', ',', '120']
+ - x: 100
+ - y: 120
+ - shape: SQUARE
+
+
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+ - color: BLUE
+ - posn: ['50', ',', '80']
+ - x: 50
+ - y: 80
+ - shape: CIRCLE
+ - size: 50
+
+
+ color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+ ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+ - color: GREEN
+ - posn: ['20', ',', '40']
+ - x: 20
+ - y: 40
+ - shape: TRIANGLE
+ - size: 20
+ """
+ def __init__( self, exprs, savelist = True ):
+ super(Each,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = True
+ self.initExprGroups = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.initExprGroups:
+ self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+ opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+ opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+ self.optionals = opt1 + opt2
+ self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+ self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+ self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+ self.required += self.multirequired
+ self.initExprGroups = False
+ tmpLoc = loc
+ tmpReqd = self.required[:]
+ tmpOpt = self.optionals[:]
+ matchOrder = []
+
+ keepMatching = True
+ while keepMatching:
+ tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+ failed = []
+ for e in tmpExprs:
+ try:
+ tmpLoc = e.tryParse( instring, tmpLoc )
+ except ParseException:
+ failed.append(e)
+ else:
+ matchOrder.append(self.opt1map.get(id(e),e))
+ if e in tmpReqd:
+ tmpReqd.remove(e)
+ elif e in tmpOpt:
+ tmpOpt.remove(e)
+ if len(failed) == len(tmpExprs):
+ keepMatching = False
+
+ if tmpReqd:
+ missing = ", ".join(_ustr(e) for e in tmpReqd)
+ raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+ # add any unmatched Optionals, in case they have default values defined
+ matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+ resultlist = []
+ for e in matchOrder:
+ loc,results = e._parse(instring,loc,doActions)
+ resultlist.append(results)
+
+ finalResults = sum(resultlist, ParseResults([]))
+ return loc, finalResults
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+ """
+ Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(ParseElementEnhance,self).__init__(savelist)
+ if isinstance( expr, basestring ):
+ if issubclass(ParserElement._literalStringClass, Token):
+ expr = ParserElement._literalStringClass(expr)
+ else:
+ expr = ParserElement._literalStringClass(Literal(expr))
+ self.expr = expr
+ self.strRepr = None
+ if expr is not None:
+ self.mayIndexError = expr.mayIndexError
+ self.mayReturnEmpty = expr.mayReturnEmpty
+ self.setWhitespaceChars( expr.whiteChars )
+ self.skipWhitespace = expr.skipWhitespace
+ self.saveAsList = expr.saveAsList
+ self.callPreparse = expr.callPreparse
+ self.ignoreExprs.extend(expr.ignoreExprs)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr is not None:
+ return self.expr._parse( instring, loc, doActions, callPreParse=False )
+ else:
+ raise ParseException("",loc,self.errmsg,self)
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def streamline( self ):
+ super(ParseElementEnhance,self).streamline()
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ if self in parseElementList:
+ raise RecursiveGrammarException( parseElementList+[self] )
+ subRecCheckList = parseElementList[:] + [ self ]
+ if self.expr is not None:
+ self.expr.checkRecursion( subRecCheckList )
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion( [] )
+
+ def __str__( self ):
+ try:
+ return super(ParseElementEnhance,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None and self.expr is not None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+ return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+ """
+ Lookahead matching of the given parse expression. C{FollowedBy}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression matches at the current
+ position. C{FollowedBy} always returns a null token list.
+
+ Example::
+ # use FollowedBy to match a label only if it is followed by a ':'
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+ prints::
+ [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+ """
+ def __init__( self, expr ):
+ super(FollowedBy,self).__init__(expr)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self.expr.tryParse( instring, loc )
+ return loc, []
+
+
+class NotAny(ParseElementEnhance):
+ """
+ Lookahead to disallow matching with the given parse expression. C{NotAny}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression does I{not} match at the current
+ position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+ always returns a null token list. May be constructed using the '~' operator.
+
+ Example::
+
+ """
+ def __init__( self, expr ):
+ super(NotAny,self).__init__(expr)
+ #~ self.leaveWhitespace()
+ self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+ self.mayReturnEmpty = True
+ self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr.canParseNext(instring, loc):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+ return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+ def __init__( self, expr, stopOn=None):
+ super(_MultipleMatch, self).__init__(expr)
+ self.saveAsList = True
+ ender = stopOn
+ if isinstance(ender, basestring):
+ ender = ParserElement._literalStringClass(ender)
+ self.not_ender = ~ender if ender is not None else None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self_expr_parse = self.expr._parse
+ self_skip_ignorables = self._skipIgnorables
+ check_ender = self.not_ender is not None
+ if check_ender:
+ try_not_ender = self.not_ender.tryParse
+
+ # must be at least one (but first see if we are the stopOn sentinel;
+ # if so, fail)
+ if check_ender:
+ try_not_ender(instring, loc)
+ loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+ try:
+ hasIgnoreExprs = (not not self.ignoreExprs)
+ while 1:
+ if check_ender:
+ try_not_ender(instring, loc)
+ if hasIgnoreExprs:
+ preloc = self_skip_ignorables( instring, loc )
+ else:
+ preloc = loc
+ loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+ if tmptokens or tmptokens.haskeys():
+ tokens += tmptokens
+ except (ParseException,IndexError):
+ pass
+
+ return loc, tokens
+
+class OneOrMore(_MultipleMatch):
+ """
+ Repetition of one or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match one or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: BLACK"
+ OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+ # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+ # could also be written as
+ (attr_expr * (1,)).parseString(text).pprint()
+ """
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+ return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+ """
+ Optional repetition of zero or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example: similar to L{OneOrMore}
+ """
+ def __init__( self, expr, stopOn=None):
+ super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+ except (ParseException,IndexError):
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+ return self.strRepr
+
+class _NullToken(object):
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+ def __str__(self):
+ return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+ """
+ Optional matching of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - default (optional) - value to be returned if the optional expression is not found.
+
+ Example::
+ # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+ zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+ zip.runTests('''
+ # traditional ZIP code
+ 12345
+
+ # ZIP+4 form
+ 12101-0001
+
+ # invalid ZIP
+ 98765-
+ ''')
+ prints::
+ # traditional ZIP code
+ 12345
+ ['12345']
+
+ # ZIP+4 form
+ 12101-0001
+ ['12101-0001']
+
+ # invalid ZIP
+ 98765-
+ ^
+ FAIL: Expected end of text (at char 5), (line:1, col:6)
+ """
+ def __init__( self, expr, default=_optionalNotMatched ):
+ super(Optional,self).__init__( expr, savelist=False )
+ self.saveAsList = self.expr.saveAsList
+ self.defaultValue = default
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+ except (ParseException,IndexError):
+ if self.defaultValue is not _optionalNotMatched:
+ if self.expr.resultsName:
+ tokens = ParseResults([ self.defaultValue ])
+ tokens[self.expr.resultsName] = self.defaultValue
+ else:
+ tokens = [ self.defaultValue ]
+ else:
+ tokens = []
+ return loc, tokens
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]"
+
+ return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+ """
+ Token for skipping over all undefined text until the matched expression is found.
+
+ Parameters:
+ - expr - target expression marking the end of the data to be skipped
+ - include - (default=C{False}) if True, the target expression is also parsed
+ (the skipped text and target expression are returned as a 2-element list).
+ - ignore - (default=C{None}) used to define grammars (typically quoted strings and
+ comments) that might contain false matches to the target expression
+ - failOn - (default=C{None}) define expressions that are not allowed to be
+ included in the skipped test; if found before the target expression is found,
+ the SkipTo is not a match
+
+ Example::
+ report = '''
+ Outstanding Issues Report - 1 Jan 2000
+
+ # | Severity | Description | Days Open
+ -----+----------+-------------------------------------------+-----------
+ 101 | Critical | Intermittent system crash | 6
+ 94 | Cosmetic | Spelling error on Login ('log|n') | 14
+ 79 | Minor | System slow when running too many reports | 47
+ '''
+ integer = Word(nums)
+ SEP = Suppress('|')
+ # use SkipTo to simply match everything up until the next SEP
+ # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+ # - parse action will call token.strip() for each matched token, i.e., the description body
+ string_data = SkipTo(SEP, ignore=quotedString)
+ string_data.setParseAction(tokenMap(str.strip))
+ ticket_expr = (integer("issue_num") + SEP
+ + string_data("sev") + SEP
+ + string_data("desc") + SEP
+ + integer("days_open"))
+
+ for tkt in ticket_expr.searchString(report):
+ print tkt.dump()
+ prints::
+ ['101', 'Critical', 'Intermittent system crash', '6']
+ - days_open: 6
+ - desc: Intermittent system crash
+ - issue_num: 101
+ - sev: Critical
+ ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+ - days_open: 14
+ - desc: Spelling error on Login ('log|n')
+ - issue_num: 94
+ - sev: Cosmetic
+ ['79', 'Minor', 'System slow when running too many reports', '47']
+ - days_open: 47
+ - desc: System slow when running too many reports
+ - issue_num: 79
+ - sev: Minor
+ """
+ def __init__( self, other, include=False, ignore=None, failOn=None ):
+ super( SkipTo, self ).__init__( other )
+ self.ignoreExpr = ignore
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.includeMatch = include
+ self.asList = False
+ if isinstance(failOn, basestring):
+ self.failOn = ParserElement._literalStringClass(failOn)
+ else:
+ self.failOn = failOn
+ self.errmsg = "No match found for "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ startloc = loc
+ instrlen = len(instring)
+ expr = self.expr
+ expr_parse = self.expr._parse
+ self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+ self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+
+ tmploc = loc
+ while tmploc <= instrlen:
+ if self_failOn_canParseNext is not None:
+ # break if failOn expression matches
+ if self_failOn_canParseNext(instring, tmploc):
+ break
+
+ if self_ignoreExpr_tryParse is not None:
+ # advance past ignore expressions
+ while 1:
+ try:
+ tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+ except ParseBaseException:
+ break
+
+ try:
+ expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+ except (ParseException, IndexError):
+ # no match, advance loc in string
+ tmploc += 1
+ else:
+ # matched skipto expr, done
+ break
+
+ else:
+ # ran off the end of the input string without matching skipto expr, fail
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ # build up return values
+ loc = tmploc
+ skiptext = instring[startloc:loc]
+ skipresult = ParseResults(skiptext)
+
+ if self.includeMatch:
+ loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+ skipresult += mat
+
+ return loc, skipresult
+
+class Forward(ParseElementEnhance):
+ """
+ Forward declaration of an expression to be defined later -
+ used for recursive grammars, such as algebraic infix notation.
+ When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+ Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+ Specifically, '|' has a lower precedence than '<<', so that::
+ fwdExpr << a | b | c
+ will actually be evaluated as::
+ (fwdExpr << a) | b | c
+ thereby leaving b and c out as parseable alternatives. It is recommended that you
+ explicitly group the values inserted into the C{Forward}::
+ fwdExpr << (a | b | c)
+ Converting to use the '<<=' operator instead will avoid this problem.
+
+ See L{ParseResults.pprint} for an example of a recursive parser created using
+ C{Forward}.
+ """
+ def __init__( self, other=None ):
+ super(Forward,self).__init__( other, savelist=False )
+
+ def __lshift__( self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass(other)
+ self.expr = other
+ self.strRepr = None
+ self.mayIndexError = self.expr.mayIndexError
+ self.mayReturnEmpty = self.expr.mayReturnEmpty
+ self.setWhitespaceChars( self.expr.whiteChars )
+ self.skipWhitespace = self.expr.skipWhitespace
+ self.saveAsList = self.expr.saveAsList
+ self.ignoreExprs.extend(self.expr.ignoreExprs)
+ return self
+
+ def __ilshift__(self, other):
+ return self << other
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ return self
+
+ def streamline( self ):
+ if not self.streamlined:
+ self.streamlined = True
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def validate( self, validateTrace=[] ):
+ if self not in validateTrace:
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion([])
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+ return self.__class__.__name__ + ": ..."
+
+ # stubbed out for now - creates awful memory and perf issues
+ self._revertClass = self.__class__
+ self.__class__ = _ForwardNoRecurse
+ try:
+ if self.expr is not None:
+ retString = _ustr(self.expr)
+ else:
+ retString = "None"
+ finally:
+ self.__class__ = self._revertClass
+ return self.__class__.__name__ + ": " + retString
+
+ def copy(self):
+ if self.expr is not None:
+ return super(Forward,self).copy()
+ else:
+ ret = Forward()
+ ret <<= self
+ return ret
+
+class _ForwardNoRecurse(Forward):
+ def __str__( self ):
+ return "..."
+
+class TokenConverter(ParseElementEnhance):
+ """
+ Abstract subclass of C{ParseExpression}, for converting parsed results.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(TokenConverter,self).__init__( expr )#, savelist )
+ self.saveAsList = False
+
+class Combine(TokenConverter):
+ """
+ Converter to concatenate all matching tokens to a single string.
+ By default, the matching patterns must also be contiguous in the input string;
+ this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+ Example::
+ real = Word(nums) + '.' + Word(nums)
+ print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+ # will also erroneously match the following
+ print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+ real = Combine(Word(nums) + '.' + Word(nums))
+ print(real.parseString('3.1416')) # -> ['3.1416']
+ # no match when there are internal spaces
+ print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+ """
+ def __init__( self, expr, joinString="", adjacent=True ):
+ super(Combine,self).__init__( expr )
+ # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+ if adjacent:
+ self.leaveWhitespace()
+ self.adjacent = adjacent
+ self.skipWhitespace = True
+ self.joinString = joinString
+ self.callPreparse = True
+
+ def ignore( self, other ):
+ if self.adjacent:
+ ParserElement.ignore(self, other)
+ else:
+ super( Combine, self).ignore( other )
+ return self
+
+ def postParse( self, instring, loc, tokenlist ):
+ retToks = tokenlist.copy()
+ del retToks[:]
+ retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+ if self.resultsName and retToks.haskeys():
+ return [ retToks ]
+ else:
+ return retToks
+
+class Group(TokenConverter):
+ """
+ Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+ Example::
+ ident = Word(alphas)
+ num = Word(nums)
+ term = ident | num
+ func = ident + Optional(delimitedList(term))
+ print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
+
+ func = ident + Group(Optional(delimitedList(term)))
+ print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
+ """
+ def __init__( self, expr ):
+ super(Group,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ return [ tokenlist ]
+
+class Dict(TokenConverter):
+ """
+ Converter to return a repetitive expression as a list, but also as a dictionary.
+ Each element can also be referenced using the first token in the expression as its key.
+ Useful for tabular report scraping when the first column can be used as a item key.
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ # print attributes as plain groups
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+ result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+ print(result.dump())
+
+ # access named fields as dict entries, or output as dict
+ print(result['shape'])
+ print(result.asDict())
+ prints::
+ ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+ See more examples at L{ParseResults} of accessing fields by results name.
+ """
+ def __init__( self, expr ):
+ super(Dict,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ for i,tok in enumerate(tokenlist):
+ if len(tok) == 0:
+ continue
+ ikey = tok[0]
+ if isinstance(ikey,int):
+ ikey = _ustr(tok[0]).strip()
+ if len(tok)==1:
+ tokenlist[ikey] = _ParseResultsWithOffset("",i)
+ elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+ tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+ else:
+ dictvalue = tok.copy() #ParseResults(i)
+ del dictvalue[0]
+ if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+ else:
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+ if self.resultsName:
+ return [ tokenlist ]
+ else:
+ return tokenlist
+
+
+class Suppress(TokenConverter):
+ """
+ Converter for ignoring the results of a parsed expression.
+
+ Example::
+ source = "a, b, c,d"
+ wd = Word(alphas)
+ wd_list1 = wd + ZeroOrMore(',' + wd)
+ print(wd_list1.parseString(source))
+
+ # often, delimiters that are useful during parsing are just in the
+ # way afterward - use Suppress to keep them out of the parsed output
+ wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+ print(wd_list2.parseString(source))
+ prints::
+ ['a', ',', 'b', ',', 'c', ',', 'd']
+ ['a', 'b', 'c', 'd']
+ (See also L{delimitedList}.)
+ """
+ def postParse( self, instring, loc, tokenlist ):
+ return []
+
+ def suppress( self ):
+ return self
+
+
+class OnlyOnce(object):
+ """
+ Wrapper for parse actions, to ensure they are only called once.
+ """
+ def __init__(self, methodCall):
+ self.callable = _trim_arity(methodCall)
+ self.called = False
+ def __call__(self,s,l,t):
+ if not self.called:
+ results = self.callable(s,l,t)
+ self.called = True
+ return results
+ raise ParseException(s,l,"")
+ def reset(self):
+ self.called = False
+
+def traceParseAction(f):
+ """
+ Decorator for debugging parse actions.
+
+ When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+ When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+ Example::
+ wd = Word(alphas)
+
+ @traceParseAction
+ def remove_duplicate_chars(tokens):
+ return ''.join(sorted(set(''.join(tokens))))
+
+ wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+ print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+ prints::
+ >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+ <<leaving remove_duplicate_chars (ret: 'dfjkls')
+ ['dfjkls']
+ """
+ f = _trim_arity(f)
+ def z(*paArgs):
+ thisFunc = f.__name__
+ s,l,t = paArgs[-3:]
+ if len(paArgs)>3:
+ thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+ sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+ try:
+ ret = f(*paArgs)
+ except Exception as exc:
+ sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+ raise
+ sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+ return ret
+ try:
+ z.__name__ = f.__name__
+ except AttributeError:
+ pass
+ return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+ """
+ Helper to define a delimited list of expressions - the delimiter defaults to ','.
+ By default, the list elements and delimiters can have intervening whitespace, and
+ comments, but this can be overridden by passing C{combine=True} in the constructor.
+ If C{combine} is set to C{True}, the matching tokens are returned as a single token
+ string, with the delimiters included; otherwise, the matching tokens are returned
+ as a list of tokens, with the delimiters suppressed.
+
+ Example::
+ delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+ delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+ """
+ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+ if combine:
+ return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+ else:
+ return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+ """
+ Helper to define a counted list of expressions.
+ This helper defines a pattern of the form::
+ integer expr expr expr...
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+
+ If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+ Example::
+ countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+ countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
+ """
+ arrayExpr = Forward()
+ def countFieldParseAction(s,l,t):
+ n = t[0]
+ arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+ return []
+ if intExpr is None:
+ intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.setName("arrayLen")
+ intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+ return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+ ret = []
+ for i in L:
+ if isinstance(i,list):
+ ret.extend(_flatten(i))
+ else:
+ ret.append(i)
+ return ret
+
+def matchPreviousLiteral(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousLiteral(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches a
+ previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+ If this is not desired, use C{matchPreviousExpr}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ def copyTokenToRepeater(s,l,t):
+ if t:
+ if len(t) == 1:
+ rep << t[0]
+ else:
+ # flatten t tokens
+ tflat = _flatten(t.asList())
+ rep << And(Literal(tt) for tt in tflat)
+ else:
+ rep << Empty()
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def matchPreviousExpr(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousExpr(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches by
+ expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+ the expressions are evaluated first, and then compared, so
+ C{"1"} is compared with C{"10"}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+ def copyTokenToRepeater(s,l,t):
+ matchTokens = _flatten(t.asList())
+ def mustMatchTheseTokens(s,l,t):
+ theseTokens = _flatten(t.asList())
+ if theseTokens != matchTokens:
+ raise ParseException("",0,"")
+ rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def _escapeRegexRangeChars(s):
+ #~ escape these chars: ^-]
+ for c in r"\^-]":
+ s = s.replace(c,_bslash+c)
+ s = s.replace("\n",r"\n")
+ s = s.replace("\t",r"\t")
+ return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+ """
+ Helper to quickly define a set of alternative Literals, and makes sure to do
+ longest-first testing when there is a conflict, regardless of the input order,
+ but returns a C{L{MatchFirst}} for best performance.
+
+ Parameters:
+ - strs - a string of space-delimited literals, or a collection of string literals
+ - caseless - (default=C{False}) - treat all literals as caseless
+ - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+ object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+ if creating a C{Regex} raises an exception)
+
+ Example::
+ comp_oper = oneOf("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
+ prints::
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ if caseless:
+ isequal = ( lambda a,b: a.upper() == b.upper() )
+ masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+ parseElementClass = CaselessLiteral
+ else:
+ isequal = ( lambda a,b: a == b )
+ masks = ( lambda a,b: b.startswith(a) )
+ parseElementClass = Literal
+
+ symbols = []
+ if isinstance(strs,basestring):
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ warnings.warn("Invalid argument to oneOf, expected string or iterable",
+ SyntaxWarning, stacklevel=2)
+ if not symbols:
+ return NoMatch()
+
+ i = 0
+ while i < len(symbols)-1:
+ cur = symbols[i]
+ for j,other in enumerate(symbols[i+1:]):
+ if ( isequal(other, cur) ):
+ del symbols[i+j+1]
+ break
+ elif ( masks(cur, other) ):
+ del symbols[i+j+1]
+ symbols.insert(i,other)
+ cur = other
+ break
+ else:
+ i += 1
+
+ if not caseless and useRegex:
+ #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+ try:
+ if len(symbols)==len("".join(symbols)):
+ return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ else:
+ return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ except Exception:
+ warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+ SyntaxWarning, stacklevel=2)
+
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+ """
+ Helper to easily and clearly define a dictionary by specifying the respective patterns
+ for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+ in the proper order. The key pattern can include delimiting markers or punctuation,
+ as long as they are suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the C{Dict} results can include named token
+ fields.
+
+ Example::
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dictOf(attr_label, attr_value).parseString(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.asDict())
+ prints::
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+ """
+ Helper to return the original, untokenized text for a given expression. Useful to
+ restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+ revert separate tokens with intervening whitespace back to the original matching
+ input text. By default, returns astring containing the original parsed text.
+
+ If the optional C{asString} argument is passed as C{False}, then the return value is a
+ C{L{ParseResults}} containing any results names that were originally matched, and a
+ single token containing the original matched text from the input string. So if
+ the expression passed to C{L{originalTextFor}} contains expressions with defined
+ results names, you must set C{asString} to C{False} if you want to preserve those
+ results name values.
+
+ Example::
+ src = "this is test <b> bold <i>text</i> </b> normal text "
+ for tag in ("b","i"):
+ opener,closer = makeHTMLTags(tag)
+ patt = originalTextFor(opener + SkipTo(closer) + closer)
+ print(patt.searchString(src)[0])
+ prints::
+ ['<b> bold <i>text</i> </b>']
+ ['<i>text</i>']
+ """
+ locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s,l,t: s[t._original_start:t._original_end]
+ else:
+ def extractText(s,l,t):
+ t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+ matchExpr.setParseAction(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ return matchExpr
+
+def ungroup(expr):
+ """
+ Helper to undo pyparsing's default grouping of And expressions, even
+ if all but one are non-empty.
+ """
+ return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+ """
+ Helper to decorate a returned token with its starting and ending locations in the input string.
+ This helper adds the following results names:
+ - locn_start = location where matched expression begins
+ - locn_end = location where matched expression ends
+ - value = the actual parsed results
+
+ Be careful if the input text contains C{<TAB>} characters, you may want to call
+ C{L{ParserElement.parseWithTabs}}
+
+ Example::
+ wd = Word(alphas)
+ for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+ prints::
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().setParseAction(lambda s,l,t: l)
+ return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty = Empty().setName("empty")
+lineStart = LineStart().setName("lineStart")
+lineEnd = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+ r"""
+ Helper to easily define string ranges for use in Word construction. Borrows
+ syntax from regexp '[]' string range definitions::
+ srange("[0-9]") -> "0123456789"
+ srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
+ srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+ The input string must be enclosed in []'s, and the returned string is the expanded
+ character set joined into a single string.
+ The values enclosed in the []'s may be:
+ - a single character
+ - an escaped character with a leading backslash (such as C{\-} or C{\]})
+ - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
+ (C{\0x##} is also supported for backwards compatibility)
+ - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+ - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+ - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+ """
+ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+ try:
+ return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+ except Exception:
+ return ""
+
+def matchOnlyAtCol(n):
+ """
+ Helper method for defining parse actions that require matching at a specific
+ column in the input text.
+ """
+ def verifyCol(strg,locn,toks):
+ if col(locn,strg) != n:
+ raise ParseException(strg,locn,"matched token not at column %d" % n)
+ return verifyCol
+
+def replaceWith(replStr):
+ """
+ Helper method for common parse actions that simply return a literal value. Especially
+ useful when used with C{L{transformString<ParserElement.transformString>}()}.
+
+ Example::
+ num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+ term = na | num
+
+ OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+ """
+ Helper parse action for removing quotation marks from parsed quoted strings.
+
+ Example::
+ # by default, quotation marks are included in parsed results
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use removeQuotes to strip quotation marks from parsed results
+ quotedString.setParseAction(removeQuotes)
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+def tokenMap(func, *args):
+ """
+ Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
+ args are passed, they are forwarded to the given function as additional arguments after
+ the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+ parsed data to an integer using base 16.
+
+ Example (compare the last to example in L{ParserElement.transformString}::
+ hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+ hex_ints.runTests('''
+ 00 11 22 aa FF 0a 0d 1a
+ ''')
+
+ upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+ OneOrMore(upperword).runTests('''
+ my kingdom for a horse
+ ''')
+
+ wd = Word(alphas).setParseAction(tokenMap(str.title))
+ OneOrMore(wd).setParseAction(' '.join).runTests('''
+ now is the winter of our discontent made glorious summer by this sun of york
+ ''')
+ prints::
+ 00 11 22 aa FF 0a 0d 1a
+ [0, 17, 34, 170, 255, 10, 13, 26]
+
+ my kingdom for a horse
+ ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+ now is the winter of our discontent made glorious summer by this sun of york
+ ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+ """
+ def pa(s,l,t):
+ return [func(tokn, *args) for tokn in t]
+
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ pa.__name__ = func_name
+
+ return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+
+def _makeTags(tagStr, xml):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr,basestring):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas,alphanums+"_-:")
+ if (xml):
+ tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ else:
+ printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+ tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+ Optional( Suppress("=") + tagAttrValue ) ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ closeTag = Combine(_L("</") + tagStr + ">")
+
+ openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+ closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+ openTag.tag = resname
+ closeTag.tag = resname
+ return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+ tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+ a,a_end = makeHTMLTags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.searchString(text):
+ # attributes in the <A> tag (like "href" shown here) are also accessible as named results
+ print(link.link_text, '->', link.href)
+ prints::
+ pyparsing -> http://pyparsing.wikispaces.com
+ """
+ return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+ tags only in the given upper/lower case.
+
+ Example: similar to L{makeHTMLTags}
+ """
+ return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+ """
+ Helper to create a validating parse action to be used with start tags created
+ with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+ with a required attribute value, to avoid false matches on common tags such as
+ C{<TD>} or C{<DIV>}.
+
+ Call C{withAttribute} with a series of attribute names and values. Specify the list
+ of filter attributes names and values as:
+ - keyword arguments, as in C{(align="right")}, or
+ - as an explicit dict with C{**} operator, when an attribute name is also a Python
+ reserved word, as in C{**{"class":"Customer", "align":"right"}}
+ - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+ For attribute names with a namespace prefix, you must use the second form. Attribute
+ names are matched insensitive to upper/lower case.
+
+ If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
+
+ To verify that the attribute exists, but without specifying a value, pass
+ C{withAttribute.ANY_VALUE} as the value.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div type="grid">1 4 0 1 0</div>
+ <div type="graph">1,3 2,3 1,1</div>
+ <div>this has no type</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().setParseAction(withAttribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attrDict.items()
+ attrs = [(k,v) for k,v in attrs]
+ def pa(s,l,tokens):
+ for attrName,attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s,l,"no matching attribute " + attrName)
+ if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+ (attrName, tokens[attrName], attrValue))
+ return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+ """
+ Simplified version of C{L{withAttribute}} when matching on a div class - made
+ difficult because C{class} is a reserved word in Python.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div class="grid">1 4 0 1 0</div>
+ <div class="graph">1,3 2,3 1,1</div>
+ <div>this &lt;div&gt; has no class</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+ div_grid = div().setParseAction(withClass("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = "%s:class" % namespace if namespace else "class"
+ return withAttribute(**{classattr : classname})
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+ """
+ Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary or
+ binary, left- or right-associative. Parse actions can also be attached
+ to operator expressions. The generated parser will also recognize the use
+ of parentheses to override operator precedences (see example below).
+
+ Note: if you define a deep operator list, you may see performance issues
+ when using infixNotation. See L{ParserElement.enablePackrat} for a
+ mechanism to potentially improve your parser performance.
+
+ Parameters:
+ - baseExpr - expression representing the most basic element for the nested
+ - opList - list of tuples, one for each operator precedence level in the
+ expression grammar; each tuple is of the form
+ (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+ - opExpr is the pyparsing expression for the operator;
+ may also be a string, which will be converted to a Literal;
+ if numTerms is 3, opExpr is a tuple of two expressions, for the
+ two operators separating the 3 terms
+ - numTerms is the number of terms for this operator (must
+ be 1, 2, or 3)
+ - rightLeftAssoc is the indicator whether the operator is
+ right or left associative, using the pyparsing-defined
+ constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
+ - parseAction is the parse action to be associated with
+ expressions matching this operator expression (the
+ parse action tuple member may be omitted); if the parse action
+ is passed a tuple or list of functions, this is equivalent to
+ calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
+ - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
+ - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
+
+ Example::
+ # simple example of four-function arithmetic with ints and variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infixNotation(integer | varname,
+ [
+ ('-', 1, opAssoc.RIGHT),
+ (oneOf('* /'), 2, opAssoc.LEFT),
+ (oneOf('+ -'), 2, opAssoc.LEFT),
+ ])
+
+ arith_expr.runTests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', fullDump=False)
+ prints::
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+ ret = Forward()
+ lastExpr = baseExpr | ( lpar + ret + rpar )
+ for i,operDef in enumerate(opList):
+ opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+ termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+ if arity == 3:
+ if opExpr is None or len(opExpr) != 2:
+ raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+ opExpr1, opExpr2 = opExpr
+ thisExpr = Forward().setName(termName)
+ if rightLeftAssoc == opAssoc.LEFT:
+ if arity == 1:
+ matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ elif rightLeftAssoc == opAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Optional):
+ opExpr = Optional(opExpr)
+ matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ else:
+ raise ValueError("operator must indicate right or left associativity")
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.setParseAction(*pa)
+ else:
+ matchExpr.setParseAction(pa)
+ thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+ Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+ """
+ Helper method for defining nested lists enclosed in opening and closing
+ delimiters ("(" and ")" are the default).
+
+ Parameters:
+ - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
+ - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
+ - content - expression for items within the nested lists (default=C{None})
+ - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
+
+ If an expression is not provided for the content argument, the nested
+ expression will capture all whitespace-delimited content between delimiters
+ as a list of separate values.
+
+ Use the C{ignoreExpr} argument to define expressions that may contain
+ opening or closing characters that should not be treated as opening
+ or closing characters for nesting, such as quotedString or a comment
+ expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+ The default is L{quotedString}, but if no expressions are to be ignored,
+ then pass C{None} for this argument.
+
+ Example::
+ data_type = oneOf("void int short long char float double")
+ decl_data_type = Combine(data_type + Optional(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR,RPAR = map(Suppress, "()")
+
+ code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(cStyleComment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.searchString(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+ prints::
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener,basestring) and isinstance(closer,basestring):
+ if len(opener) == 1 and len(closer)==1:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ ~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ raise ValueError("opening and closing arguments must be strings if no content expression is given")
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+ else:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
+ ret.setName('nested %s%s expression' % (opener,closer))
+ return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+ """
+ Helper method for defining space-delimited indentation blocks, such as
+ those used to define block statements in Python source code.
+
+ Parameters:
+ - blockStatementExpr - expression defining syntax of statement that
+ is repeated within the indented block
+ - indentStack - list created by caller to manage indentation stack
+ (multiple statementWithIndentedBlock expressions within a single grammar
+ should share a common indentStack)
+ - indent - boolean indicating whether block must be indented beyond the
+ the current level; set to False for block of left-most statements
+ (default=C{True})
+
+ A valid block must contain at least one C{blockStatement}.
+
+ Example::
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group( funcDecl + func_body )
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << ( funcDef | assignment | identifier )
+
+ module_body = OneOrMore(stmt)
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+ prints::
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ def checkPeerIndent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseFatalException(s,l,"illegal nesting")
+ raise ParseException(s,l,"not a peer entry")
+
+ def checkSubIndent(s,l,t):
+ curCol = col(l,s)
+ if curCol > indentStack[-1]:
+ indentStack.append( curCol )
+ else:
+ raise ParseException(s,l,"not a subentry")
+
+ def checkUnindent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+ raise ParseException(s,l,"not an unindent")
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+ INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+ PEER = Empty().setParseAction(checkPeerIndent).setName('')
+ UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+ if indent:
+ smExpr = Group( Optional(NL) +
+ #~ FollowedBy(blockStatementExpr) +
+ INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+ else:
+ smExpr = Group( Optional(NL) +
+ (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form C{/* ... */}"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form C{<!-- ... -->}"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form C{// ... (to end of line)}"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
+
+javaStyleComment = cppStyleComment
+"Same as C{L{cppStyleComment}}"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form C{# ... (to end of line)}"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+ Optional( Word(" \t") +
+ ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
+ This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """
+ Here are some common low-level expressions that may be useful in jump-starting parser development:
+ - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
+ - common L{programming identifiers<identifier>}
+ - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
+ - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
+ - L{UUID<uuid>}
+ - L{comma-separated list<comma_separated_list>}
+ Parse actions:
+ - C{L{convertToInteger}}
+ - C{L{convertToFloat}}
+ - C{L{convertToDate}}
+ - C{L{convertToDatetime}}
+ - C{L{stripHTMLTags}}
+ - C{L{upcaseTokens}}
+ - C{L{downcaseTokens}}
+
+ Example::
+ pyparsing_common.number.runTests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.runTests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.runTests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.runTests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.runTests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+ prints::
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convertToInteger = tokenMap(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convertToFloat = tokenMap(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.addParseAction(lambda t: t[0]/t[-1])
+
+ mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.addParseAction(sum)
+
+ real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+ """expression that parses a floating point number with optional scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+ """any int or real number, returned as float"""
+
+ identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+ "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
+
+ _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+ _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+ _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+ ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convertToDate(fmt="%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
+
+ Example::
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.setParseAction(pyparsing_common.convertToDate())
+ print(date_expr.parseString("1999-12-31"))
+ prints::
+ [datetime.date(1999, 12, 31)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ @staticmethod
+ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+ """
+ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
+
+ Example::
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+ print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+ prints::
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+ "ISO8601 date (C{yyyy-mm-dd})"
+
+ iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+ "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
+
+ uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+ "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
+
+ _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+ @staticmethod
+ def stripHTMLTags(s, l, tokens):
+ """
+ Parse action to remove HTML tags from web page HTML source
+
+ Example::
+ # strip HTML links from normal text
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ td,td_end = makeHTMLTags("TD")
+ table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+
+ print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
+ """
+ return pyparsing_common._html_stripper.transformString(tokens[0])
+
+ _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+ comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+ """Parse action to convert tokens to lower case."""
+
+
+if __name__ == "__main__":
+
+ selectToken = CaselessLiteral("select")
+ fromToken = CaselessLiteral("from")
+
+ ident = Word(alphas, alphanums + "_$")
+
+ columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ columnNameList = Group(delimitedList(columnName)).setName("columns")
+ columnSpec = ('*' | columnNameList)
+
+ tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ tableNameList = Group(delimitedList(tableName)).setName("tables")
+
+ simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+ # demo runTests method, including embedded comments in test string
+ simpleSQL.runTests("""
+ # '*' as column list and dotted table name
+ select * from SYS.XYZZY
+
+ # caseless match on "SELECT", and casts back to "select"
+ SELECT * from XYZZY, ABC
+
+ # list of column names, and mixed case SELECT keyword
+ Select AA,BB,CC from Sys.dual
+
+ # multiple tables
+ Select A, B, C from Sys.dual, Table2
+
+ # invalid SELECT keyword - should fail
+ Xelect A, B, C from Sys.dual
+
+ # incomplete command - should fail
+ Select
+
+ # invalid column name - should fail
+ Select ^^^ frox Sys.dual
+
+ """)
+
+ pyparsing_common.number.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ # any int or real number, returned as float
+ pyparsing_common.fnumber.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ pyparsing_common.hex_integer.runTests("""
+ 100
+ FF
+ """)
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests("""
+ 12345678-1234-5678-1234-567812345678
+ """)
diff --git a/third_party/python/setuptools/pkg_resources/extern/__init__.py b/third_party/python/setuptools/pkg_resources/extern/__init__.py
new file mode 100644
index 0000000000..4dc3beb2fa
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/extern/__init__.py
@@ -0,0 +1,66 @@
+import sys
+
+
+class VendorImporter:
+ """
+ A PEP 302 meta path importer for finding optionally-vendored
+ or otherwise naturally-installed packages from root_name.
+ """
+
+ def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+ self.root_name = root_name
+ self.vendored_names = set(vendored_names)
+ self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+ @property
+ def search_path(self):
+ """
+ Search first the vendor package then as a natural package.
+ """
+ yield self.vendor_pkg + '.'
+ yield ''
+
+ def find_module(self, fullname, path=None):
+ """
+ Return self when fullname starts with root_name and the
+ target module is one vendored through this importer.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ if root:
+ return
+ if not any(map(target.startswith, self.vendored_names)):
+ return
+ return self
+
+ def load_module(self, fullname):
+ """
+ Iterate over the search path to locate and load fullname.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ for prefix in self.search_path:
+ try:
+ extant = prefix + target
+ __import__(extant)
+ mod = sys.modules[extant]
+ sys.modules[fullname] = mod
+ return mod
+ except ImportError:
+ pass
+ else:
+ raise ImportError(
+ "The '{target}' package is required; "
+ "normally this is bundled with this package so if you get "
+ "this warning, consult the packager of your "
+ "distribution.".format(**locals())
+ )
+
+ def install(self):
+ """
+ Install this importer into sys.meta_path if not already present.
+ """
+ if self not in sys.meta_path:
+ sys.meta_path.append(self)
+
+
+names = 'packaging', 'pyparsing', 'appdirs'
+VendorImporter(__name__, names).install()