diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..301ffe540f0242521617353143a2ee3ad715a44e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/METADATA @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: nvidia-curand-cu12 +Version: 10.3.2.106 +Summary: CURAND native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt + +CURAND native runtime libraries diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/__init__.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..955fdc48b60cc6f9bc03916039a379a741a7d93a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/__init__.py @@ -0,0 +1,3303 @@ +""" +Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +import sys +import os +import io +import time +import re +import types +import zipfile +import zipimport +import warnings +import stat +import functools +import pkgutil +import operator +import platform +import collections +import plistlib +import email.parser +import errno +import tempfile +import textwrap +import itertools +import inspect +import ntpath +import posixpath +import importlib +from pkgutil import get_importer + +try: + import _imp +except ImportError: + # Python 3.2 compatibility + import imp as _imp + +try: + FileExistsError +except NameError: + FileExistsError = OSError + +# capture these to bypass sandboxing +from os import utime +try: + from os import mkdir, rename, unlink + WRITE_SUPPORT = True +except ImportError: + # no write support, probably under GAE + WRITE_SUPPORT = False + +from os import open as os_open +from os.path import isdir, split + +try: + import importlib.machinery as importlib_machinery + # access attribute to force import under delayed import mechanisms. + importlib_machinery.__name__ +except ImportError: + importlib_machinery = None + +from pkg_resources.extern import appdirs +from pkg_resources.extern import packaging +__import__('pkg_resources.extern.packaging.version') +__import__('pkg_resources.extern.packaging.specifiers') +__import__('pkg_resources.extern.packaging.requirements') +__import__('pkg_resources.extern.packaging.markers') + +if sys.version_info < (3, 5): + raise RuntimeError("Python 3.5 or later is required") + +# declare some globals that will be defined later to +# satisfy the linters. +require = None +working_set = None +add_activation_listener = None +resources_stream = None +cleanup_resources = None +resource_dir = None +resource_stream = None +set_extraction_path = None +resource_isdir = None +resource_string = None +iter_entry_points = None +resource_listdir = None +resource_filename = None +resource_exists = None +_distribution_finders = None +_namespace_handlers = None +_namespace_packages = None + + +class PEP440Warning(RuntimeWarning): + """ + Used when there is an issue with a version or specifier not complying with + PEP 440. + """ + + +def parse_version(v): + try: + return packaging.version.Version(v) + except packaging.version.InvalidVersion: + warnings.warn( + f"{v} is an invalid version and will not be supported in " + "a future release", + PkgResourcesDeprecationWarning, + ) + return packaging.version.LegacyVersion(v) + + +_state_vars = {} + + +def _declare_state(vartype, **kw): + globals().update(kw) + _state_vars.update(dict.fromkeys(kw, vartype)) + + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.items(): + state[k] = g['_sget_' + v](g[k]) + return state + + +def __setstate__(state): + g = globals() + for k, v in state.items(): + g['_sset_' + _state_vars[k]](k, g[k], v) + return state + + +def _sget_dict(val): + return val.copy() + + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + + +def _sget_object(val): + return val.__getstate__() + + +def _sset_object(key, ob, state): + ob.__setstate__(state) + + +_sget_none = _sset_none = lambda *args: None + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of macOS that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of macOS that we are *running*. To allow usage of packages that + explicitly require a newer version of macOS, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform() + m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) + except ValueError: + # not macOS + pass + return plat + + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError', 'VersionConflict', 'DistributionNotFound', + 'UnknownExtra', 'ExtractionError', + + # Warnings + 'PEP440Warning', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Warnings + 'PkgResourcesDeprecationWarning', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] + + +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + + def __repr__(self): + return self.__class__.__name__ + repr(self.args) + + +class VersionConflict(ResolutionError): + """ + An already-installed version conflicts with the requested version. + + Should be initialized with the installed Distribution and the requested + Requirement. + """ + + _template = "{self.dist} is installed but {self.req} is required" + + @property + def dist(self): + return self.args[0] + + @property + def req(self): + return self.args[1] + + def report(self): + return self._template.format(**locals()) + + def with_context(self, required_by): + """ + If required_by is non-empty, return a version of self that is a + ContextualVersionConflict. + """ + if not required_by: + return self + args = self.args + (required_by,) + return ContextualVersionConflict(*args) + + +class ContextualVersionConflict(VersionConflict): + """ + A VersionConflict that accepts a third parameter, the set of the + requirements that required the installed Distribution. + """ + + _template = VersionConflict._template + ' by {self.required_by}' + + @property + def required_by(self): + return self.args[2] + + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + + _template = ("The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}") + + @property + def req(self): + return self.args[0] + + @property + def requirers(self): + return self.args[1] + + @property + def requirers_str(self): + if not self.requirers: + return 'the application' + return ', '.join(self.requirers) + + def report(self): + return self._template.format(**locals()) + + def __str__(self): + return self.report() + + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" + + +_provider_factories = {} + +PY_MAJOR = '{}.{}'.format(*sys.version_info) +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq, Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + + +def _macos_vers(_cache=[]): + if not _cache: + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + if hasattr(plistlib, 'readPlist'): + plist_content = plistlib.readPlist(plist) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + + _cache.append(version.split('.')) + return _cache[0] + + +def _macos_arch(machine): + return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and macOS. + """ + from sysconfig import get_platform + + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macos_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % ( + int(version[0]), int(version[1]), + _macos_arch(machine), + ) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +# XXX backward compat +get_platform = get_build_platform + + +def compatible_platforms(provided, required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided == required: + # easy case + return True + + # macOS special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macOS designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + return True + # egg isn't macOS or legacy darwin + return False + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + + +# backward compatibility +run_main = run_script + + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist, str): + dist = Requirement.parse(dist) + if isinstance(dist, Requirement): + dist = get_provider(dist) + if not isinstance(dist, Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + +class WorkingSet: + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + @classmethod + def _build_master(cls): + """ + Prepare the master working set. + """ + ws = cls() + try: + from __main__ import __requires__ + except ImportError: + # The main program does not list any requirements + return ws + + # ensure the requirements are met + try: + ws.require(__requires__) + except VersionConflict: + return cls._build_from_requirements(__requires__) + + return ws + + @classmethod + def _build_from_requirements(cls, req_spec): + """ + Build a working set from a requirement spec. Rewrites sys.path. + """ + # try it without defaults already on sys.path + # by starting with an empty path + ws = cls([]) + reqs = parse_requirements(req_spec) + dists = ws.resolve(reqs, Environment()) + for dist in dists: + ws.add(dist) + + # add any missing entries from sys.path + for entry in sys.path: + if entry not in ws.entries: + ws.add_entry(entry) + + # then copy back to sys.path + sys.path[:] = ws.entries + return ws + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + def __contains__(self, dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + # XXX add more info + raise VersionConflict(dist, req) + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + return ( + entry + for dist in self + for entry in dist.get_entry_map(group).values() + if name is None or name == entry.name + ) + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + if item not in self.entry_keys: + # workaround a cache issue + continue + + for key in self.entry_keys[item]: + if key not in seen: + seen[key] = 1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True, replace=False): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set, unless `replace=True`. + If it's added, any callbacks registered with the ``subscribe()`` method + will be called. + """ + if insert: + dist.insert_on(self.entries, entry, replace=replace) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry, []) + keys2 = self.entry_keys.setdefault(dist.location, []) + if not replace and dist.key in self.by_key: + # ignore hidden distros + return + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + # FIXME: 'WorkingSet.resolve' is too complex (11) + def resolve(self, requirements, env=None, installer=None, # noqa: C901 + replace_conflicting=False, extras=None): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + + Unless `replace_conflicting=True`, raises a VersionConflict exception + if + any requirements are found on the path that have the correct name but + the wrong version. Otherwise, if an `installer` is supplied it will be + invoked to obtain the correct version of the requirement and activate + it. + + `extras` is a list of the extras to be used with these requirements. + This is important because extra requirements may look like `my_req; + extra = "my_extra"`, which would otherwise be interpreted as a purely + optional requirement. Instead, we want to be able to assert that these + requirements are truly required. + """ + + # set up the stack + requirements = list(requirements)[::-1] + # set of processed requirements + processed = {} + # key -> dist + best = {} + to_activate = [] + + req_extras = _ReqExtras() + + # Mapping of requirement to set of distributions that required it; + # useful for reporting info about conflicts. + required_by = collections.defaultdict(set) + + while requirements: + # process dependencies breadth-first + req = requirements.pop(0) + if req in processed: + # Ignore cyclic or redundant dependencies + continue + + if not req_extras.markers_pass(req, extras): + continue + + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match( + req, ws, installer, + replace_conflicting=replace_conflicting + ) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + + # push the new requirements onto the stack + new_requirements = dist.requires(req.extras)[::-1] + requirements.extend(new_requirements) + + # Register the new requirements needed by req + for new_requirement in new_requirements: + required_by[new_requirement].add(req.project_name) + req_extras[new_requirement] = req.extras + + processed[req] = True + + # return list of distros to activate + return to_activate + + def find_plugins( + self, plugin_env, full_env=None, installer=None, fallback=True): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + # add plugins+libs to sys.path + map(working_set.add, distributions) + # display errors + print('Could not load', errors) + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + # scan project names in alphabetic order + plugin_projects.sort() + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + # put all our entries in shadow_set + list(map(shadow_set.add, self)) + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError as v: + # save error info + error_info[dist] = v + if fallback: + # try the next older version of project + continue + else: + # give up on this project, keep going + break + + else: + list(map(shadow_set.add, resolvees)) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback, existing=True): + """Invoke `callback` for all distributions + + If `existing=True` (default), + call on all existing ones, as well. + """ + if callback in self.callbacks: + return + self.callbacks.append(callback) + if not existing: + return + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class _ReqExtras(dict): + """ + Map each requirement to the extras that demanded it. + """ + + def markers_pass(self, req, extras=None): + """ + Evaluate markers for req against each extra that + demanded it. + + Return False if the req has a marker and fails + evaluation. Otherwise, return True. + """ + extra_evals = ( + req.marker.evaluate({'extra': extra}) + for extra in self.get(req, ()) + (extras or (None,)) + ) + return not req.marker or any(extra_evals) + + +class Environment: + """Searchable snapshot of distributions on a search path""" + + def __init__( + self, search_path=None, platform=get_supported_platform(), + python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'3.6'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + py_compat = ( + self.python is None + or dist.py_version is None + or dist.py_version == self.python + ) + return py_compat and compatible_platforms(dist.platform, self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self, project_name): + """Return a newest-to-oldest list of distributions for `project_name` + + Uses case-insensitive `project_name` comparison, assuming all the + project's distributions use their project's name converted to all + lowercase as their key. + + """ + distribution_key = project_name.lower() + return self._distmap.get(distribution_key, []) + + def add(self, dist): + """Add `dist` if we ``can_add()`` it and it has not already been added + """ + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key, []) + if dist not in dists: + dists.append(dist) + dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) + + def best_match( + self, req, working_set, installer=None, replace_conflicting=False): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) If a suitable distribution + isn't active, this method returns the newest distribution in the + environment that meets the ``Requirement`` in `req`. If no suitable + distribution is found, and `installer` is supplied, then the result of + calling the environment's ``obtain(req, installer)`` method will be + returned. + """ + try: + dist = working_set.find(req) + except VersionConflict: + if not replace_conflicting: + raise + dist = None + if dist is not None: + return dist + for dist in self[req.key]: + if dist in req: + return dist + # try to download/install + return self.obtain(req, installer) + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: + yield key + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other, Distribution): + self.add(other) + elif isinstance(other, Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +# XXX backward compatibility +AvailableDistributions = Environment + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + tmpl = textwrap.dedent(""" + Can't extract file(s) to egg cache + + The following error occurred while trying to extract file(s) + to the Python egg cache: + + {old_exc} + + The Python egg cache directory is currently set to: + + {cache_path} + + Perhaps your account does not have write access to this directory? + You can change the cache directory by setting the PYTHON_EGG_CACHE + environment variable to point to an accessible directory. + """).lstrip() + err = ExtractionError(tmpl.format(**locals())) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name + '-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except Exception: + self.extraction_error() + + self._warn_unsafe_extraction_path(extract_path) + + self.cached_files[target_path] = 1 + return target_path + + @staticmethod + def _warn_unsafe_extraction_path(path): + """ + If the default extraction path is overridden and set to an insecure + location, such as /tmp, it opens up an opportunity for an attacker to + replace an extracted file with an unauthorized payload. Warn the user + if a known insecure location is used. + + See Distribute #375 for more details. + """ + if os.name == 'nt' and not path.startswith(os.environ['windir']): + # On Windows, permissions are generally restrictive by default + # and temp directories are not writable by other users, so + # bypass the warning. + return + mode = os.stat(path).st_mode + if mode & stat.S_IWOTH or mode & stat.S_IWGRP: + msg = ( + "Extraction path is writable by group/others " + "and vulnerable to attack when " + "used with get_resource_filename ({path}). " + "Consider a more secure " + "location (set with .set_extraction_path or the " + "PYTHON_EGG_CACHE environment variable)." + ).format(**locals()) + warnings.warn(msg, UserWarning) + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 + os.chmod(tempname, mode) + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + + +def get_default_cache(): + """ + Return the ``PYTHON_EGG_CACHE`` environment variable + or a platform-relevant user cache dir for an app + named "Python-Eggs". + """ + return ( + os.environ.get('PYTHON_EGG_CACHE') + or appdirs.user_cache_dir(appname='Python-Eggs') + ) + + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """ + Convert an arbitrary string to a standard version string + """ + try: + # normalize the version + return str(packaging.version.Version(version)) + except packaging.version.InvalidVersion: + version = version.replace(' ', '.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-', '_') + + +def invalid_marker(text): + """ + Validate text as a PEP 508 environment marker; return an exception + if invalid or False otherwise. + """ + try: + evaluate_marker(text) + except SyntaxError as e: + e.filename = None + e.lineno = None + return e + return False + + +def evaluate_marker(text, extra=None): + """ + Evaluate a PEP 508 environment marker. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + + This implementation uses the 'pyparsing' module. + """ + try: + marker = packaging.markers.Marker(text) + return marker.evaluate() + except packaging.markers.InvalidMarker as e: + raise SyntaxError(e) from e + + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return io.BytesIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def _get_metadata_path(self, name): + return self._fn(self.egg_info, name) + + def has_metadata(self, name): + if not self.egg_info: + return self.egg_info + + path = self._get_metadata_path(name) + return self._has(path) + + def get_metadata(self, name): + if not self.egg_info: + return "" + path = self._get_metadata_path(name) + value = self._get(path) + try: + return value.decode('utf-8') + except UnicodeDecodeError as exc: + # Include the path in the error message to simplify + # troubleshooting, and without changing the exception type. + exc.reason += ' in {} file at path: {}'.format(name, path) + raise + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self, resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self, name): + return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + + def resource_listdir(self, resource_name): + return self._listdir(self._fn(self.module_path, resource_name)) + + def metadata_listdir(self, name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info, name)) + return [] + + def run_script(self, script_name, namespace): + script = 'scripts/' + script_name + if not self.has_metadata(script): + raise ResolutionError( + "Script {script!r} not found in metadata at {self.egg_info!r}" + .format(**locals()), + ) + script_text = self.get_metadata(script).replace('\r\n', '\n') + script_text = script_text.replace('\r', '\n') + script_filename = self._fn(self.egg_info, script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + with open(script_filename) as fid: + source = fid.read() + code = compile(source, script_filename, 'exec') + exec(code, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text, script_filename, 'exec') + exec(script_code, namespace, namespace) + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + self._validate_resource_path(resource_name) + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + @staticmethod + def _validate_resource_path(path): + """ + Validate the resource paths according to the docs. + https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access + + >>> warned = getfixture('recwarn') + >>> warnings.simplefilter('always') + >>> vrp = NullProvider._validate_resource_path + >>> vrp('foo/bar.txt') + >>> bool(warned) + False + >>> vrp('../foo/bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('/foo/bar.txt') + >>> bool(warned) + True + >>> vrp('foo/../../bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('foo/f../bar.txt') + >>> bool(warned) + False + + Windows path separators are straight-up disallowed. + >>> vrp(r'\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + >>> vrp(r'C:\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + Blank values are allowed + + >>> vrp('') + >>> bool(warned) + False + + Non-string values are not. + + >>> vrp(None) + Traceback (most recent call last): + ... + AttributeError: ... + """ + invalid = ( + os.path.pardir in path.split(posixpath.sep) or + posixpath.isabs(path) or + ntpath.isabs(path) + ) + if not invalid: + return + + msg = "Use of .. or absolute path in a resource path is not allowed." + + # Aggressively disallow Windows absolute paths + if ntpath.isabs(path) and not posixpath.isabs(path): + raise ValueError(msg) + + # for compatibility, warn; in future + # raise ValueError(msg) + warnings.warn( + msg[:-1] + " and will raise exceptions in a future release.", + DeprecationWarning, + stacklevel=4, + ) + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + + +register_loader_type(object, NullProvider) + + +def _parents(path): + """ + yield all parents of path including path + """ + last = None + while path != last: + yield path + last = path + path, _ = os.path.split(path) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self, module): + NullProvider.__init__(self, module) + self._setup_prefix() + + def _setup_prefix(self): + # Assume that metadata may be nested inside a "basket" + # of multiple eggs and use module_path instead of .archive. + eggs = filter(_is_egg_path, _parents(self.module_path)) + egg = next(eggs, None) + egg and self._set_egg(egg) + + def _set_egg(self, path): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self, path): + return os.path.isdir(path) + + def _listdir(self, path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + with open(path, 'rb') as stream: + return stream.read() + + @classmethod + def _register(cls): + loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + for name in loader_names: + loader_cls = getattr(importlib_machinery, name, type(None)) + register_loader_type(loader_cls, cls) + + +DefaultProvider._register() + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + module_path = None + + _isdir = _has = lambda self, path: False + + def _get(self, path): + return '' + + def _listdir(self, path): + return [] + + def __init__(self): + pass + + +empty_provider = EmptyProvider() + + +class ZipManifests(dict): + """ + zip manifest builder + """ + + @classmethod + def build(cls, path): + """ + Build a dictionary similar to the zipimport directory + caches, except instead of tuples, store ZipInfo objects. + + Use a platform-specific path separator (os.sep) for the path keys + for compatibility with pypy on Windows. + """ + with zipfile.ZipFile(path) as zfile: + items = ( + ( + name.replace('/', os.sep), + zfile.getinfo(name), + ) + for name in zfile.namelist() + ) + return dict(items) + + load = build + + +class MemoizedZipManifests(ZipManifests): + """ + Memoized zipfile manifests. + """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') + + def load(self, path): + """ + Load a manifest at path or return a suitable manifest already loaded. + """ + path = os.path.normpath(path) + mtime = os.stat(path).st_mtime + + if path not in self or self[path].mtime != mtime: + manifest = self.build(path) + self[path] = self.manifest_mod(manifest, mtime) + + return self[path].manifest + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + _zip_manifests = MemoizedZipManifests() + + def __init__(self, module): + EggProvider.__init__(self, module) + self.zip_pre = self.loader.archive + os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + fspath = fspath.rstrip(os.sep) + if fspath == self.loader.archive: + return '' + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.zip_pre) + ) + + def _parts(self, zip_path): + # Convert a zipfile subpath into an egg-relative path part list. + # pseudo-fs path + fspath = self.zip_pre + zip_path + if fspath.startswith(self.egg_root + os.sep): + return fspath[len(self.egg_root) + 1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.egg_root) + ) + + @property + def zipinfo(self): + return self._zip_manifests.load(self.loader.archive) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + @staticmethod + def _get_date_and_size(zip_stat): + size = zip_stat.file_size + # ymdhms+wday, yday, dst + date_time = zip_stat.date_time + (0, 0, -1) + # 1980 offset already done + timestamp = time.mktime(date_time) + return timestamp, size + + # FIXME: 'ZipProvider._extract_resource' is too complex (12) + def _extract_resource(self, manager, zip_path): # noqa: C901 + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + # return the extracted directory name + return os.path.dirname(last) + + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + + if not WRITE_SUPPORT: + raise IOError('"os.rename" and "os.unlink" are not supported ' + 'on this platform') + try: + + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if self._is_current(real_path, zip_path): + return real_path + + outf, tmpnam = _mkstemp( + ".$extract", + dir=os.path.dirname(real_path), + ) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp, timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + if self._is_current(real_path, zip_path): + # the file became current since it was checked above, + # so proceed. + return real_path + # Windows, del old file and retry + elif os.name == 'nt': + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + # report a user-friendly error + manager.extraction_error() + + return real_path + + def _is_current(self, file_path, zip_path): + """ + Return True if the file_path is current for this zip_path + """ + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + if not os.path.isfile(file_path): + return False + stat = os.stat(file_path) + if stat.st_size != size or stat.st_mtime != timestamp: + return False + # check that the contents match + zip_contents = self.loader.get_data(zip_path) + with open(file_path, 'rb') as f: + file_contents = f.read() + return zip_contents == file_contents + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self, fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self, fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.egg_root, resource_name)) + + def _resource_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.module_path, resource_name)) + + +register_loader_type(zipimport.zipimporter, ZipProvider) + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self, path): + self.path = path + + def _get_metadata_path(self, name): + return self.path + + def has_metadata(self, name): + return name == 'PKG-INFO' and os.path.isfile(self.path) + + def get_metadata(self, name): + if name != 'PKG-INFO': + raise KeyError("No metadata except PKG-INFO is available") + + with io.open(self.path, encoding='utf-8', errors="replace") as f: + metadata = f.read() + self._warn_on_replacement(metadata) + return metadata + + def _warn_on_replacement(self, metadata): + replacement_char = '�' + if replacement_char in metadata: + tmpl = "{self.path} could not be properly decoded in UTF-8" + msg = tmpl.format(**locals()) + warnings.warn(msg) + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir, project_name=dist_name, metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zip_pre = importer.archive + os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + + +_declare_state('dict', _distribution_finders={}) + + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + + +def find_eggs_in_zip(importer, path_item, only=False): + """ + Find eggs in zip files; possibly multiple nested eggs. + """ + if importer.archive.endswith('.whl'): + # wheels are not supported with this finder + # they don't have PKG-INFO metadata, and won't ever contain eggs + return + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + # don't yield nested distros + return + for subitem in metadata.resource_listdir(''): + if _is_egg_path(subitem): + subpath = os.path.join(path_item, subitem) + dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) + for dist in dists: + yield dist + elif subitem.lower().endswith(('.dist-info', '.egg-info')): + subpath = os.path.join(path_item, subitem) + submeta = EggMetadata(zipimport.zipimporter(subpath)) + submeta.egg_info = subpath + yield Distribution.from_location(path_item, subitem, submeta) + + +register_finder(zipimport.zipimporter, find_eggs_in_zip) + + +def find_nothing(importer, path_item, only=False): + return () + + +register_finder(object, find_nothing) + + +def _by_version_descending(names): + """ + Given a list of filenames, return them in descending order + by version number. + + >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' + >>> _by_version_descending(names) + ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] + """ + def try_parse(name): + """ + Attempt to parse as a version or return a null version. + """ + try: + return packaging.version.Version(name) + except Exception: + return packaging.version.Version('0') + + def _by_version(name): + """ + Parse each component of the filename + """ + name, ext = os.path.splitext(name) + parts = itertools.chain(name.split('-'), [ext]) + return [try_parse(part) for part in parts] + + return sorted(names, key=_by_version, reverse=True) + + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if _is_unpacked_egg(path_item): + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item, 'EGG-INFO') + ) + ) + return + + entries = ( + os.path.join(path_item, child) + for child in safe_listdir(path_item) + ) + + # for performance, before sorting by version, + # screen entries for only those that will yield + # distributions + filtered = ( + entry + for entry in entries + if dist_factory(path_item, entry, only) + ) + + # scan for .egg and .egg-info in directory + path_item_entries = _by_version_descending(filtered) + for entry in path_item_entries: + fullpath = os.path.join(path_item, entry) + factory = dist_factory(path_item, entry, only) + for dist in factory(fullpath): + yield dist + + +def dist_factory(path_item, entry, only): + """Return a dist_factory for the given entry.""" + lower = entry.lower() + is_egg_info = lower.endswith('.egg-info') + is_dist_info = ( + lower.endswith('.dist-info') and + os.path.isdir(os.path.join(path_item, entry)) + ) + is_meta = is_egg_info or is_dist_info + return ( + distributions_from_metadata + if is_meta else + find_distributions + if not only and _is_egg_path(entry) else + resolve_egg_link + if not only and lower.endswith('.egg-link') else + NoDists() + ) + + +class NoDists: + """ + >>> bool(NoDists()) + False + + >>> list(NoDists()('anything')) + [] + """ + def __bool__(self): + return False + + def __call__(self, fullpath): + return iter(()) + + +def safe_listdir(path): + """ + Attempt to list contents of path, but suppress some exceptions. + """ + try: + return os.listdir(path) + except (PermissionError, NotADirectoryError): + pass + except OSError as e: + # Ignore the directory if does not exist, not a directory or + # permission denied + if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): + raise + return () + + +def distributions_from_metadata(path): + root = os.path.dirname(path) + if os.path.isdir(path): + if len(os.listdir(path)) == 0: + # empty metadata dir; skip + return + metadata = PathMetadata(root, path) + else: + metadata = FileMetadata(path) + entry = os.path.basename(path) + yield Distribution.from_location( + root, entry, metadata, precedence=DEVELOP_DIST, + ) + + +def non_empty_lines(path): + """ + Yield non-empty lines from file at path + """ + with open(path) as f: + for line in f: + line = line.strip() + if line: + yield line + + +def resolve_egg_link(path): + """ + Given a path to an .egg-link, resolve distributions + present in the referenced path. + """ + referenced_paths = non_empty_lines(path) + resolved_paths = ( + os.path.join(os.path.dirname(path), ref) + for ref in referenced_paths + ) + dist_groups = map(find_distributions, resolved_paths) + return next(dist_groups, ()) + + +register_finder(pkgutil.ImpImporter, find_on_path) + +if hasattr(importlib_machinery, 'FileFinder'): + register_finder(importlib_machinery.FileFinder, find_on_path) + +_declare_state('dict', _namespace_handlers={}) +_declare_state('dict', _namespace_packages={}) + + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer, path_entry, moduleName, module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + + importer = get_importer(path_item) + if importer is None: + return None + + # use find_spec (PEP 451) and fall-back to find_module (PEP 302) + try: + loader = importer.find_spec(packageName).loader + except AttributeError: + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = types.ModuleType(packageName) + module.__path__ = [] + _set_parent_ns(packageName) + elif not hasattr(module, '__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer, path_item, packageName, module) + if subpath is not None: + path = module.__path__ + path.append(subpath) + importlib.import_module(packageName) + _rebuild_mod_path(path, packageName, module) + return subpath + + +def _rebuild_mod_path(orig_path, package_name, module): + """ + Rebuild module.__path__ ensuring that all entries are ordered + corresponding to their sys.path order + """ + sys_path = [_normalize_cached(p) for p in sys.path] + + def safe_sys_path_index(entry): + """ + Workaround for #520 and #513. + """ + try: + return sys_path.index(entry) + except ValueError: + return float('inf') + + def position_in_sys_path(path): + """ + Return the ordinal of the path based on its position in sys.path + """ + path_parts = path.split(os.sep) + module_parts = package_name.count('.') + 1 + parts = path_parts[:-module_parts] + return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) + + new_path = sorted(orig_path, key=position_in_sys_path) + new_path = [_normalize_cached(p) for p in new_path] + + if isinstance(module.__path__, list): + module.__path__[:] = new_path + else: + module.__path__ = new_path + + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + _imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path = sys.path + parent, _, _ = packageName.rpartition('.') + + if parent: + declare_namespace(parent) + if parent not in _namespace_packages: + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError as e: + raise TypeError("Not a package:", parent) from e + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent or None, []).append(packageName) + _namespace_packages.setdefault(packageName, []) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + _imp.release_lock() + + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + _imp.acquire_lock() + try: + for package in _namespace_packages.get(parent, ()): + subpath = _handle_ns(package, path_item) + if subpath: + fixup_namespace_packages(subpath, package) + finally: + _imp.release_lock() + + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item) == normalized: + break + else: + # Only return the path if it's not already there + return subpath + + +register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) + +if hasattr(importlib_machinery, 'FileFinder'): + register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + + +register_namespace_handler(object, null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(os.path.normpath( + _cygwin_patch(filename)))) + + +def _cygwin_patch(filename): # pragma: nocover + """ + Contrary to POSIX 2008, on Cygwin, getcwd (3) contains + symlink components. Using + os.path.abspath() works around this limitation. A fix in os.getcwd() + would probably better, in Cygwin even more so, except + that this seems to be by design... + """ + return os.path.abspath(filename) if sys.platform == 'cygwin' else filename + + +def _normalize_cached(filename, _cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + + +def _is_egg_path(path): + """ + Determine if given path appears to be an egg. + """ + return _is_zip_egg(path) or _is_unpacked_egg(path) + + +def _is_zip_egg(path): + return ( + path.lower().endswith('.egg') and + os.path.isfile(path) and + zipfile.is_zipfile(path) + ) + + +def _is_unpacked_egg(path): + """ + Determine if given path appears to be an unpacked egg. + """ + return ( + path.lower().endswith('.egg') and + os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) + ) + + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def _nonblank(str): + return str and not str.startswith('#') + + +@functools.singledispatch +def yield_lines(iterable): + """Yield valid lines of a string or iterable""" + return itertools.chain.from_iterable(map(yield_lines, iterable)) + + +@yield_lines.register(str) +def _(text): + return filter(_nonblank, map(str.strip, text.splitlines())) + + +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r""" + (?P[^-]+) ( + -(?P[^-]+) ( + -py(?P[^-]+) ( + -(?P.+) + )? + )? + )? + """, + re.VERBOSE | re.IGNORECASE, +).match + + +class EntryPoint: + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = tuple(extras) + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, *args, **kwargs): + """ + Require packages for this EntryPoint, then resolve it. + """ + if not require or args or kwargs: + warnings.warn( + "Parameters to load are deprecated. Call .resolve and " + ".require separately.", + PkgResourcesDeprecationWarning, + stacklevel=2, + ) + if require: + self.require(*args, **kwargs) + return self.resolve() + + def resolve(self): + """ + Resolve the entry point from its module and attrs. + """ + module = __import__(self.module_name, fromlist=['__name__'], level=0) + try: + return functools.reduce(getattr, self.attrs, module) + except AttributeError as exc: + raise ImportError(str(exc)) from exc + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + + # Get the requirements for this entry point with all its extras and + # then resolve them. We have to pass `extras` along when resolving so + # that the working set knows what extras we want. Otherwise, for + # dist-info distributions, the working set will assume that the + # requirements for that extra are purely optional and skip over them. + reqs = self.dist.requires(self.extras) + items = working_set.resolve(reqs, env, installer, extras=self.extras) + list(map(working_set.add, items)) + + pattern = re.compile( + r'\s*' + r'(?P.+?)\s*' + r'=\s*' + r'(?P[\w.]+)\s*' + r'(:\s*(?P[\w.]+))?\s*' + r'(?P\[.*\])?\s*$' + ) + + @classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1, extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + m = cls.pattern.match(src) + if not m: + msg = "EntryPoint must be in 'name=module:attrs [extras]' format" + raise ValueError(msg, src) + res = m.groupdict() + extras = cls._parse_extras(res['extras']) + attrs = res['attr'].split('.') if res['attr'] else () + return cls(res['name'], res['module'], attrs, extras, dist) + + @classmethod + def _parse_extras(cls, extras_spec): + if not extras_spec: + return () + req = Requirement.parse('x' + extras_spec) + if req.specs: + raise ValueError() + return req.extras + + @classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name] = ep + return this + + @classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data, dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + +def _version_from_file(lines): + """ + Given an iterable of lines from a Metadata file, return + the value of the Version field, if present, or None otherwise. + """ + def is_version_line(line): + return line.lower().startswith('version:') + version_lines = filter(is_version_line, lines) + line = next(iter(version_lines), '') + _, _, value = line.partition(':') + return safe_version(value.strip()) or None + + +class Distribution: + """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' + + def __init__( + self, location=None, metadata=None, project_name=None, + version=None, py_version=PY_MAJOR, platform=None, + precedence=EGG_DIST): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + @classmethod + def from_location(cls, location, basename, metadata=None, **kw): + project_name, version, py_version, platform = [None] * 4 + basename, ext = os.path.splitext(basename) + if ext.lower() in _distributionImpl: + cls = _distributionImpl[ext.lower()] + + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name', 'ver', 'pyver', 'plat' + ) + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + )._reload_version() + + def _reload_version(self): + return self + + @property + def hashcmp(self): + return ( + self.parsed_version, + self.precedence, + self.key, + self.location, + self.py_version or '', + self.platform or '', + ) + + def __hash__(self): + return hash(self.hashcmp) + + def __lt__(self, other): + return self.hashcmp < other.hashcmp + + def __le__(self, other): + return self.hashcmp <= other.hashcmp + + def __gt__(self, other): + return self.hashcmp > other.hashcmp + + def __ge__(self, other): + return self.hashcmp >= other.hashcmp + + def __eq__(self, other): + if not isinstance(other, self.__class__): + # It's not a Distribution, so they are not equal + return False + return self.hashcmp == other.hashcmp + + def __ne__(self, other): + return not self == other + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + @property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + + @property + def parsed_version(self): + if not hasattr(self, "_parsed_version"): + self._parsed_version = parse_version(self.version) + + return self._parsed_version + + def _warn_legacy_version(self): + LV = packaging.version.LegacyVersion + is_legacy = isinstance(self._parsed_version, LV) + if not is_legacy: + return + + # While an empty version is technically a legacy version and + # is not a valid PEP 440 version, it's also unlikely to + # actually come from someone and instead it is more likely that + # it comes from setuptools attempting to parse a filename and + # including it in the list. So for that we'll gate this warning + # on if the version is anything at all or not. + if not self.version: + return + + tmpl = textwrap.dedent(""" + '{project_name} ({version})' is being parsed as a legacy, + non PEP 440, + version. You may find odd behavior and sort order. + In particular it will be sorted as less than 0.0. It + is recommended to migrate to PEP 440 compatible + versions. + """).strip().replace('\n', ' ') + + warnings.warn(tmpl.format(**vars(self)), PEP440Warning) + + @property + def version(self): + try: + return self._version + except AttributeError as e: + version = self._get_version() + if version is None: + path = self._get_metadata_path_for_display(self.PKG_INFO) + msg = ( + "Missing 'Version:' header and/or {} file at path: {}" + ).format(self.PKG_INFO, path) + raise ValueError(msg, self) from e + + return version + + @property + def _dep_map(self): + """ + A map of extra to its list of (direct) requirements + for this distribution, including the null extra. + """ + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._filter_extras(self._build_dep_map()) + return self.__dep_map + + @staticmethod + def _filter_extras(dm): + """ + Given a mapping of extras to dependencies, strip off + environment markers and filter out any dependencies + not matching the markers. + """ + for extra in list(filter(None, dm)): + new_extra = extra + reqs = dm.pop(extra) + new_extra, _, marker = extra.partition(':') + fails_marker = marker and ( + invalid_marker(marker) + or not evaluate_marker(marker) + ) + if fails_marker: + reqs = [] + new_extra = safe_extra(new_extra) or None + + dm.setdefault(new_extra, []).extend(reqs) + return dm + + def _build_dep_map(self): + dm = {} + for name in 'requires.txt', 'depends.txt': + for extra, reqs in split_sections(self._get_metadata(name)): + dm.setdefault(extra, []).extend(parse_requirements(reqs)) + return dm + + def requires(self, extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None, ())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError as e: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) from e + return deps + + def _get_metadata_path_for_display(self, name): + """ + Return the path to the given metadata file, if available. + """ + try: + # We need to access _get_metadata_path() on the provider object + # directly rather than through this class's __getattr__() + # since _get_metadata_path() is marked private. + path = self._provider._get_metadata_path(name) + + # Handle exceptions e.g. in case the distribution's metadata + # provider doesn't support _get_metadata_path(). + except Exception: + return '[could not detect]' + + return path + + def _get_metadata(self, name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def _get_version(self): + lines = self._get_metadata(self.PKG_INFO) + version = _version_from_file(lines) + + return version + + def activate(self, path=None, replace=False): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: + path = sys.path + self.insert_on(path, replace=replace) + if path is sys.path: + fixup_namespace_packages(self.location) + for pkg in self._get_metadata('namespace_packages.txt'): + if pkg in sys.modules: + declare_namespace(pkg) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-' + self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self, self.location) + else: + return str(self) + + def __str__(self): + try: + version = getattr(self, 'version', None) + except ValueError: + version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name, version) + + def __getattr__(self, attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError(attr) + return getattr(self._provider, attr) + + def __dir__(self): + return list( + set(super(Distribution, self).__dir__()) + | set( + attr for attr in self._provider.__dir__() + if not attr.startswith('_') + ) + ) + + @classmethod + def from_filename(cls, filename, metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + if isinstance(self.parsed_version, packaging.version.Version): + spec = "%s==%s" % (self.project_name, self.parsed_version) + else: + spec = "%s===%s" % (self.project_name, self.parsed_version) + + return Requirement.parse(spec) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group, name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group, name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group, {}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + # FIXME: 'Distribution.insert_on' is too complex (13) + def insert_on(self, path, loc=None, replace=False): # noqa: C901 + """Ensure self.location is on path + + If replace=False (default): + - If location is already in path anywhere, do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent. + - Else: add to the end of path. + If replace=True: + - If location is already on path anywhere (not eggs) + or higher priority than its parent (eggs) + do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent, + removing any lower-priority entries. + - Else: add it to the front of path. + """ + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath = [(p and _normalize_cached(p) or p) for p in path] + + for p, item in enumerate(npath): + if item == nloc: + if replace: + break + else: + # don't modify path (even removing duplicates) if + # found and not replace + return + elif item == bdir and self.precedence == EGG_DIST: + # if it's an .egg, give it precedence over its directory + # UNLESS it's already been added to sys.path and replace=False + if (not replace) and nloc in npath[p:]: + return + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + if replace: + path.insert(0, loc) + else: + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while True: + try: + np = npath.index(nloc, p + 1) + except ValueError: + break + else: + del npath[np], path[np] + # ha! + p = np + + return + + def check_version_conflict(self): + if self.key == 'setuptools': + # ignore the inevitable setuptools self-conflicts :( + return + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages): + continue + if modname in ('pkg_resources', 'setuptools', 'site'): + continue + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or + fn.startswith(self.location)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for " + repr(self)) + return False + return True + + def clone(self, **kw): + """Copy this distribution, substituting in any changed keyword args""" + names = 'project_name version py_version platform location precedence' + for attr in names.split(): + kw.setdefault(attr, getattr(self, attr, None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + @property + def extras(self): + return [dep for dep in self._dep_map if dep] + + +class EggInfoDistribution(Distribution): + def _reload_version(self): + """ + Packages installed by distutils (e.g. numpy or scipy), + which uses an old safe_version, and so + their version numbers can get mangled when + converted to filenames (e.g., 1.11.0.dev0+2329eae to + 1.11.0.dev0_2329eae). These distributions will not be + parsed properly + downstream by Distribution and safe_version, so + take an extra step and try to get the version number from + the metadata file itself instead of the filename. + """ + md_version = self._get_version() + if md_version: + self._version = md_version + return self + + +class DistInfoDistribution(Distribution): + """ + Wrap an actual or potential sys.path entry + w/metadata, .dist-info style. + """ + PKG_INFO = 'METADATA' + EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") + + @property + def _parsed_pkg_info(self): + """Parse and cache metadata""" + try: + return self._pkg_info + except AttributeError: + metadata = self.get_metadata(self.PKG_INFO) + self._pkg_info = email.parser.Parser().parsestr(metadata) + return self._pkg_info + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._compute_dependencies() + return self.__dep_map + + def _compute_dependencies(self): + """Recompute this distribution's dependencies.""" + dm = self.__dep_map = {None: []} + + reqs = [] + # Including any condition expressions + for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: + reqs.extend(parse_requirements(req)) + + def reqs_for_extra(extra): + for req in reqs: + if not req.marker or req.marker.evaluate({'extra': extra}): + yield req + + common = frozenset(reqs_for_extra(None)) + dm[None].extend(common) + + for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: + s_extra = safe_extra(extra.strip()) + dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) + + return dm + + +_distributionImpl = { + '.egg': Distribution, + '.egg-info': EggInfoDistribution, + '.dist-info': DistInfoDistribution, +} + + +def issue_warning(*args, **kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + warnings.warn(stacklevel=level + 1, *args, **kw) + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be a string, or a (possibly-nested) iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if ' #' in line: + line = line[:line.find(' #')] + # If there is a line continuation, drop it, and append the next line. + if line.endswith('\\'): + line = line[:-2].strip() + try: + line += next(lines) + except StopIteration: + return + yield Requirement(line) + + +class RequirementParseError(packaging.requirements.InvalidRequirement): + "Compatibility wrapper for InvalidRequirement" + + +class Requirement(packaging.requirements.Requirement): + def __init__(self, requirement_string): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + super(Requirement, self).__init__(requirement_string) + self.unsafe_name = self.name + project_name = safe_name(self.name) + self.project_name, self.key = project_name, project_name.lower() + self.specs = [ + (spec.operator, spec.version) for spec in self.specifier] + self.extras = tuple(map(safe_extra, self.extras)) + self.hashCmp = ( + self.key, + self.url, + self.specifier, + frozenset(self.extras), + str(self.marker) if self.marker else None, + ) + self.__hash = hash(self.hashCmp) + + def __eq__(self, other): + return ( + isinstance(other, Requirement) and + self.hashCmp == other.hashCmp + ) + + def __ne__(self, other): + return not self == other + + def __contains__(self, item): + if isinstance(item, Distribution): + if item.key != self.key: + return False + + item = item.version + + # Allow prereleases always in order to match the previous behavior of + # this method. In the future this should be smarter and follow PEP 440 + # more accurately. + return self.specifier.contains(item, prereleases=True) + + def __hash__(self): + return self.__hash + + def __repr__(self): + return "Requirement.parse(%r)" % str(self) + + @staticmethod + def parse(s): + req, = parse_requirements(s) + return req + + +def _always_object(classes): + """ + Ensure object appears in the mro even + for old-style classes. + """ + if object not in classes: + return classes + (object,) + return classes + + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) + for t in types: + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + os.makedirs(dirname, exist_ok=True) + + +def _bypass_ensure_directory(path): + """Sandbox-bypassing version of ensure_directory()""" + if not WRITE_SUPPORT: + raise IOError('"os.mkdir" not supported on this platform.') + dirname, filename = split(path) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + try: + mkdir(dirname, 0o755) + except FileExistsError: + pass + + +def split_sections(s): + """Split a string or iterable thereof into (section, content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + + +def _mkstemp(*args, **kw): + old_open = os.open + try: + # temporarily bypass sandboxing + os.open = os_open + return tempfile.mkstemp(*args, **kw) + finally: + # and then put it back + os.open = old_open + + +# Silence the PEP440Warning by default, so that end users don't get hit by it +# randomly just because they use pkg_resources. We want to append the rule +# because we want earlier uses of filterwarnings to take precedence over this +# one. +warnings.filterwarnings("ignore", category=PEP440Warning, append=True) + + +# from jaraco.functools 1.3 +def _call_aside(f, *args, **kwargs): + f(*args, **kwargs) + return f + + +@_call_aside +def _initialize(g=globals()): + "Set up global resource manager (deliberately not state-saved)" + manager = ResourceManager() + g['_manager'] = manager + g.update( + (name, getattr(manager, name)) + for name in dir(manager) + if not name.startswith('_') + ) + + +class PkgResourcesDeprecationWarning(Warning): + """ + Base class for warning about deprecations in ``pkg_resources`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ + + +@_call_aside +def _initialize_master_working_set(): + """ + Prepare the master working set and make the ``require()`` + API available. + + This function has explicit effects on the global state + of pkg_resources. It is intended to be invoked once at + the initialization of this module. + + Invocation by other packages is unsupported and done + at their own risk. + """ + working_set = WorkingSet._build_master() + _declare_state('object', working_set=working_set) + + require = working_set.require + iter_entry_points = working_set.iter_entry_points + add_activation_listener = working_set.subscribe + run_script = working_set.run_script + # backward compatibility + run_main = run_script + # Activate all distributions already on sys.path with replace=False and + # ensure that all distributions added to the working set in the future + # (e.g. by calling ``require()``) will get activated as well, + # with higher priority (replace=True). + tuple( + dist.activate(replace=False) + for dist in working_set + ) + add_activation_listener( + lambda dist: dist.activate(replace=True), + existing=False, + ) + working_set.entries = [] + # match order + list(map(working_set.add_entry, sys.path)) + globals().update(locals()) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d07c0742dbc7fa31a81de7a85f6c274d1104ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py new file mode 100644 index 0000000000000000000000000000000000000000..ae67001af8b661373edeee2eb327b9f63e630d62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2010 ActiveState Software Inc. +# Copyright (c) 2013 Eddy Petrișor + +"""Utilities for determining application-specific dirs. + +See for details and usage. +""" +# Dev Notes: +# - MSDN on where to store app data files: +# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 +# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html +# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + +__version_info__ = (1, 4, 3) +__version__ = '.'.join(map(str, __version_info__)) + + +import sys +import os + +PY3 = sys.version_info[0] == 3 + +if PY3: + unicode = str + +if sys.platform.startswith('java'): + import platform + os_name = platform.java_ver()[3][0] + if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. + system = 'win32' + elif os_name.startswith('Mac'): # "Mac OS X", etc. + system = 'darwin' + else: # "Linux", "SunOS", "FreeBSD", etc. + # Setting this to "linux2" is not ideal, but only Windows or Mac + # are actually checked for and the rest of the module expects + # *sys.platform* style strings. + system = 'linux2' +else: + system = sys.platform + + + +def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user data directories are: + Mac OS X: ~/Library/Application Support/ + Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined + Win XP (not roaming): C:\Documents and Settings\\Application Data\\ + Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ + Win 7 (not roaming): C:\Users\\AppData\Local\\ + Win 7 (roaming): C:\Users\\AppData\Roaming\\ + + For Unix, we follow the XDG spec and support $XDG_DATA_HOME. + That means, by default "~/.local/share/". + """ + if system == "win32": + if appauthor is None: + appauthor = appname + const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(_get_win_folder(const)) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('~/Library/Application Support/') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of data dirs should be + returned. By default, the first item from XDG_DATA_DIRS is + returned, or '/usr/local/share/', + if XDG_DATA_DIRS is not set + + Typical site data directories are: + Mac OS X: /Library/Application Support/ + Unix: /usr/local/share/ or /usr/share/ + Win XP: C:\Documents and Settings\All Users\Application Data\\ + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. + + For Unix, this is using the $XDG_DATA_DIRS[0] default. + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('/Library/Application Support') + if appname: + path = os.path.join(path, appname) + else: + # XDG default for $XDG_DATA_DIRS + # only first, if multipath is False + path = os.getenv('XDG_DATA_DIRS', + os.pathsep.join(['/usr/local/share', '/usr/share'])) + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + if appname and version: + path = os.path.join(path, version) + return path + + +def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific config dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user config directories are: + Mac OS X: same as user_data_dir + Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. + That means, by default "~/.config/". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of config dirs should be + returned. By default, the first item from XDG_CONFIG_DIRS is + returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set + + Typical site config directories are: + Mac OS X: same as site_data_dir + Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in + $XDG_CONFIG_DIRS + Win *: same as site_data_dir + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + + For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system in ["win32", "darwin"]: + path = site_data_dir(appname, appauthor) + if appname and version: + path = os.path.join(path, version) + else: + # XDG default for $XDG_CONFIG_DIRS + # only first, if multipath is False + path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + +def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific cache dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Cache" to the base app data dir for Windows. See + discussion below. + + Typical user cache directories are: + Mac OS X: ~/Library/Caches/ + Unix: ~/.cache/ (XDG default) + Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache + Vista: C:\Users\\AppData\Local\\\Cache + + On Windows the only suggestion in the MSDN docs is that local settings go in + the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming + app data dir (the default returned by `user_data_dir` above). Apps typically + put cache data somewhere *under* the given dir here. Some examples: + ...\Mozilla\Firefox\Profiles\\Cache + ...\Acme\SuperApp\Cache\1.0 + OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. + This can be disabled with the `opinion=False` option. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + if opinion: + path = os.path.join(path, "Cache") + elif system == 'darwin': + path = os.path.expanduser('~/Library/Caches') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific state dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user state directories are: + Mac OS X: same as user_data_dir + Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow this Debian proposal + to extend the XDG spec and support $XDG_STATE_HOME. + + That means, by default "~/.local/state/". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific log dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Logs" to the base app data dir for Windows, and "log" to the + base cache dir for Unix. See discussion below. + + Typical user log directories are: + Mac OS X: ~/Library/Logs/ + Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined + Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs + Vista: C:\Users\\AppData\Local\\\Logs + + On Windows the only suggestion in the MSDN docs is that local settings + go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in + examples of what some windows apps use for a logs dir.) + + OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` + value for Windows and appends "log" to the user cache dir for Unix. + This can be disabled with the `opinion=False` option. + """ + if system == "darwin": + path = os.path.join( + os.path.expanduser('~/Library/Logs'), + appname) + elif system == "win32": + path = user_data_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "Logs") + else: + path = user_cache_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "log") + if appname and version: + path = os.path.join(path, version) + return path + + +class AppDirs(object): + """Convenience wrapper for getting application dirs.""" + def __init__(self, appname=None, appauthor=None, version=None, + roaming=False, multipath=False): + self.appname = appname + self.appauthor = appauthor + self.version = version + self.roaming = roaming + self.multipath = multipath + + @property + def user_data_dir(self): + return user_data_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_data_dir(self): + return site_data_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_config_dir(self): + return user_config_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_config_dir(self): + return site_config_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_cache_dir(self): + return user_cache_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_state_dir(self): + return user_state_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_log_dir(self): + return user_log_dir(self.appname, self.appauthor, + version=self.version) + + +#---- internal support stuff + +def _get_win_folder_from_registry(csidl_name): + """This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + if PY3: + import winreg as _winreg + else: + import _winreg + + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + }[csidl_name] + + key = _winreg.OpenKey( + _winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" + ) + dir, type = _winreg.QueryValueEx(key, shell_folder_name) + return dir + + +def _get_win_folder_with_pywin32(csidl_name): + from win32com.shell import shellcon, shell + dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) + # Try to make this a unicode path because SHGetFolderPath does + # not return unicode strings when there is unicode data in the + # path. + try: + dir = unicode(dir) + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + try: + import win32api + dir = win32api.GetShortPathName(dir) + except ImportError: + pass + except UnicodeError: + pass + return dir + + +def _get_win_folder_with_ctypes(csidl_name): + import ctypes + + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + }[csidl_name] + + buf = ctypes.create_unicode_buffer(1024) + ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in buf: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf2 = ctypes.create_unicode_buffer(1024) + if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + +def _get_win_folder_with_jna(csidl_name): + import array + from com.sun import jna + from com.sun.jna.platform import win32 + + buf_size = win32.WinDef.MAX_PATH * 2 + buf = array.zeros('c', buf_size) + shell = win32.Shell32.INSTANCE + shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf = array.zeros('c', buf_size) + kernel = win32.Kernel32.INSTANCE + if kernel.GetShortPathName(dir, buf, buf_size): + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + return dir + +if system == "win32": + try: + import win32com.shell + _get_win_folder = _get_win_folder_with_pywin32 + except ImportError: + try: + from ctypes import windll + _get_win_folder = _get_win_folder_with_ctypes + except ImportError: + try: + import com.sun.jna + _get_win_folder = _get_win_folder_with_jna + except ImportError: + _get_win_folder = _get_win_folder_from_registry + + +#---- self test code + +if __name__ == "__main__": + appname = "MyApp" + appauthor = "MyCompany" + + props = ("user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "site_data_dir", + "site_config_dir") + + print("-- app dirs %s --" % __version__) + + print("-- app dirs (with optional 'version')") + dirs = AppDirs(appname, appauthor, version="1.0") + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'version')") + dirs = AppDirs(appname, appauthor) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'appauthor')") + dirs = AppDirs(appname) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = AppDirs(appname, appauthor=False) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py new file mode 100644 index 0000000000000000000000000000000000000000..c359122f97125ed630760029f7fd0689f1caefd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "21.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c50c5dcfeeda2efed282200a5c5cc8c5f7542f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py @@ -0,0 +1,25 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b98a881e16da616d8038c2e66f28ebfdacc7190e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d43a6def3bc40ab8953aaeefbb408d12afdefc2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0abf64092a7f1dfb506ca5e4a31074094bce5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29da7d22092436cd6c2e89f9257f2b239296bd7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d713965cf2a5c0eee363e2a507f33e2d5f1829fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..643f3b9823f5c5ae678245b5634763b9f9a9775e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7109697236598a27c86fbbf1a7ecbfd7007f77b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47377ef561a8e42f24e043b14c6a2c0c34d4ffdf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e387b35d2c8bedc829589fe30e9ba99b7d30d89 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py new file mode 100644 index 0000000000000000000000000000000000000000..4c379aa6f69ff56c8f19612002c6e3e939ea6012 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py @@ -0,0 +1,301 @@ +import collections +import functools +import os +import re +import struct +import sys +import warnings +from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple + + +# Python does not provide platform information at sufficient granularity to +# identify the architecture of the running executable in some cases, so we +# determine it dynamically by reading the information from the running +# process. This only applies on Linux, which uses the ELF format. +class _ELFFileHeader: + # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + class _InvalidELFFileHeader(ValueError): + """ + An invalid ELF file header was found. + """ + + ELF_MAGIC_NUMBER = 0x7F454C46 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + EM_386 = 3 + EM_S390 = 22 + EM_ARM = 40 + EM_X86_64 = 62 + EF_ARM_ABIMASK = 0xFF000000 + EF_ARM_ABI_VER5 = 0x05000000 + EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + def __init__(self, file: IO[bytes]) -> None: + def unpack(fmt: str) -> int: + try: + data = file.read(struct.calcsize(fmt)) + result: Tuple[int, ...] = struct.unpack(fmt, data) + except struct.error: + raise _ELFFileHeader._InvalidELFFileHeader() + return result[0] + + self.e_ident_magic = unpack(">I") + if self.e_ident_magic != self.ELF_MAGIC_NUMBER: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_class = unpack("B") + if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_data = unpack("B") + if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_version = unpack("B") + self.e_ident_osabi = unpack("B") + self.e_ident_abiversion = unpack("B") + self.e_ident_pad = file.read(7) + format_h = "H" + format_i = "I" + format_q = "Q" + format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q + self.e_type = unpack(format_h) + self.e_machine = unpack(format_h) + self.e_version = unpack(format_i) + self.e_entry = unpack(format_p) + self.e_phoff = unpack(format_p) + self.e_shoff = unpack(format_p) + self.e_flags = unpack(format_i) + self.e_ehsize = unpack(format_h) + self.e_phentsize = unpack(format_h) + self.e_phnum = unpack(format_h) + self.e_shentsize = unpack(format_h) + self.e_shnum = unpack(format_h) + self.e_shstrndx = unpack(format_h) + + +def _get_elf_header() -> Optional[_ELFFileHeader]: + try: + with open(sys.executable, "rb") as f: + elf_header = _ELFFileHeader(f) + except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): + return None + return elf_header + + +def _is_linux_armhf() -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_ARM + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABIMASK + ) == elf_header.EF_ARM_ABI_VER5 + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD + ) == elf_header.EF_ARM_ABI_FLOAT_HARD + return result + + +def _is_linux_i686() -> bool: + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_386 + return result + + +def _have_compatible_abi(arch: str) -> bool: + if arch == "armv7l": + return _is_linux_armhf() + if arch == "i686": + return _is_linux_i686() + return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". + version_string = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.split() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(linux: str, arch: str) -> Iterator[str]: + if not _have_compatible_abi(arch): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if arch in {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(tag, arch, glibc_version): + yield linux.replace("linux", tag) + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(legacy_tag, arch, glibc_version): + yield linux.replace("linux", legacy_tag) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py new file mode 100644 index 0000000000000000000000000000000000000000..85450fafa34733d81dd8d5c52637a464e5399efa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py @@ -0,0 +1,136 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import contextlib +import functools +import operator +import os +import re +import struct +import subprocess +import sys +from typing import IO, Iterator, NamedTuple, Optional, Tuple + + +def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, f.read(struct.calcsize(fmt))) + + +def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: + """Detect musl libc location by parsing the Python executable. + + Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca + ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html + """ + f.seek(0) + try: + ident = _read_unpacked(f, "16B") + except struct.error: + return None + if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. + return None + f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, p_fmt, p_idx = { + 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. + 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. + }[ident[4]] + except KeyError: + return None + else: + p_get = operator.itemgetter(*p_idx) + + # Find the interpreter section and return its content. + try: + _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) + except struct.error: + return None + for i in range(e_phnum + 1): + f.seek(e_phoff + e_phentsize * i) + try: + p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) + except struct.error: + return None + if p_type != 3: # Not PT_INTERP. + continue + f.seek(p_offset) + interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") + if "musl" not in interpreter: + return None + return interpreter + return None + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + with contextlib.ExitStack() as stack: + try: + f = stack.enter_context(open(executable, "rb")) + except IOError: + return None + ld = _parse_ld_musl_from_elf(f) + if not ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(arch: str) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param arch: Should be the part of platform tag after the ``linux_`` + prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a + prerequisite for the current platform to be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..951549753afa255148c7c60d868303963f8c1813 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -0,0 +1,67 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..18769b09a8a34f1e7d63cc61e62cd128ff5f9484 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py @@ -0,0 +1,304 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from pkg_resources.extern.pyparsing import ( # noqa: N817 + Forward, + Group, + Literal as L, + ParseException, + ParseResults, + QuotedString, + ZeroOrMore, + stringEnd, + stringStart, +) + +from .specifiers import InvalidSpecifier, Specifier + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node: + def __init__(self, value: Any) -> None: + self.value = value + + def __str__(self) -> str: + return str(self.value) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +VARIABLE = ( + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") # PEP-345 + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # undocumented setuptools legacy + | L("extra") # PEP-508 +) +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker( + marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +class Undefined: + pass + + +_undefined = Undefined() + + +def _get_env(environment: Dict[str, str], name: str) -> str: + value: Union[str, Undefined] = environment.get(name, _undefined) + + if isinstance(value, Undefined): + raise UndefinedEnvironmentName( + f"{name!r} does not exist in evaluation environment." + ) + + return value + + +def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + raise InvalidMarker( + f"Invalid marker: {marker!r}, parse error at " + f"{marker[e.loc : e.loc + 8]!r}" + ) + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..6af14ec4ce49e633d030611c26f0bd9beaf13e6a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py @@ -0,0 +1,146 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +import string +import urllib.parse +from typing import List, Optional as TOptional, Set + +from pkg_resources.extern.pyparsing import ( # noqa + Combine, + Literal as L, + Optional, + ParseException, + Regex, + Word, + ZeroOrMore, + originalTextFor, + stringEnd, + stringStart, +) + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r"[^ ]+")("url") +URL = AT + URI + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") +_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start : t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' + ) + + self.name: str = req.name + if req.url: + parsed_url = urllib.parse.urlparse(req.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {req.url}") + self.url: TOptional[str] = req.url + else: + self.url = None + self.extras: Set[str] = set(req.extras.asList() if req.extras else []) + self.specifier: SpecifierSet = SpecifierSet(req.specifier) + self.marker: TOptional[Marker] = req.marker if req.marker else None + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..ce66bd4addbde1e332e9a42f6eb62adc471193e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -0,0 +1,828 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import functools +import itertools +import re +import warnings +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import LegacyVersion, Version, parse + +ParsedVersion = Union[Version, LegacyVersion] +UnparsedVersion = Union[Version, LegacyVersion, str] +VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) +CallableOperator = Callable[[ParsedVersion, str], bool] + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self) -> Optional[bool]: + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators: Dict[str, str] = {} + _regex: Pattern[str] + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) + + def __str__(self) -> str: + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + return self._spec[0], canonicalize_version(self._spec[1]) + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def __ne__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self) -> str: + return self._spec[0] + + @property + def version(self) -> str: + return self._spec[1] + + @property + def prereleases(self) -> Optional[bool]: + return self._prereleases + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: str) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + normalized_item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + super().__init__(spec, prereleases) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal( + self, prospective: LegacyVersion, spec: str + ) -> bool: + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective > self._coerce_version(spec) + + +def _require_version_compare( + fn: Callable[["Specifier", ParsedVersion, str], bool] +) -> Callable[["Specifier", ParsedVersion, str], bool]: + @functools.wraps(fn) + def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + @_require_version_compare + def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = split_prospective[: len(split_spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + padded_spec, padded_prospective = _pad_version( + split_spec, shortened_prospective + ) + + return padded_prospective == padded_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + @_require_version_compare + def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal( + self, prospective: ParsedVersion, spec: str + ) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self) -> bool: + + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + + # Split on , to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed: Set[_IndividualSpecifier] = set() + for specifier in split_specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return "".format(str(self), pre) + + def __str__(self) -> str: + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self) -> int: + return len(self._specs) + + def __iter__(self) -> Iterator[_IndividualSpecifier]: + return iter(self._specs) + + @property + def prereleases(self) -> Optional[bool]: + + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: UnparsedVersion) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered: List[VersionTypeVar] = [] + found_prereleases: List[VersionTypeVar] = [] + + item: UnparsedVersion + parsed_version: Union[Version, LegacyVersion] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py new file mode 100644 index 0000000000000000000000000000000000000000..e65890a90cd709489865750e953bf347720c75cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py @@ -0,0 +1,484 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import logging +import platform +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + + +def parse_tag(tag: str) -> FrozenSet[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_") + + +def _abi3_applies(python_version: PythonVersion) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}") + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def cpython_tags( + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = "cp{}".format(_version_nodot(python_version[:2])) + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + if _abi3_applies(python_version): + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if _abi3_applies(python_version): + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> Iterator[str]: + abi = sysconfig.get_config_var("SOABI") + if abi: + yield _normalize_string(abi) + + +def generic_tags( + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + platforms = list(platforms or platform_tags()) + abis = list(abis) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield "py{version}".format(version=_version_nodot(py_version[:2])) + yield "py{major}".format(major=py_version[0]) + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield "py{version}".format(version=_version_nodot((py_version[0], minor))) + + +def compatible_tags( + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + for minor_version in range(version[1], -1, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=10, minor=minor_version, binary_format=binary_format + ) + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + for major_version in range(version[0], 10, -1): + compat_version = major_version, 0 + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=major_version, minor=0, binary_format=binary_format + ) + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + else: + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_format = "universal2" + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv7l" + _, arch = linux.split("_", 1) + yield from _manylinux.platform_tags(linux, arch) + yield from _musllinux.platform_tags(arch) + yield linux + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + yield from compatible_tags() diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bab11b80c60f10a4f3bccb12eb5b17c48a449767 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py @@ -0,0 +1,136 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +_canonicalize_regex = re.compile(r"[-_.]+") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str) -> NormalizedName: + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def canonicalize_version(version: Union[Version, str]) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + # NB: This strips trailing '.0's to normalize + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + version = Version(parts[1]) + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + version = Version(version_part) + return (name, version) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..de9a09a4ed3b078b37e7490a6686f660ae935aca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py @@ -0,0 +1,504 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import collections +import itertools +import re +import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> Union["LegacyVersion", "Version"]: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: List[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf75e1e5fcbfe7eac41d2a9e446c5c980741087b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form 
+C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
+(L{'+'} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire "
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+    from _thread import RLock
+except ImportError:
+    from threading import RLock
+
+try:
+    # Python 3
+    from collections.abc import Iterable
+    from collections.abc import MutableMapping
+except ImportError:
+    # Python 2.7
+    from collections import Iterable
+    from collections import MutableMapping
+
+try:
+    from collections import OrderedDict as _OrderedDict
+except ImportError:
+    try:
+        from ordereddict import OrderedDict as _OrderedDict
+    except ImportError:
+        _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    _ustr = str
+
+    # build list of single arg builtins, that can be used as parse actions
+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+           then < returns the unicode object | encodes it with the default encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # Else encode it
+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+            xmlcharref = Regex(r'&#\d+;')
+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+            return xmlcharref.transformString(ret)
+
+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+    singleArgBuiltins = []
+    import __builtin__
+    for fname in "sum len sorted reversed list tuple set any all min max".split():
+        try:
+            singleArgBuiltins.append(getattr(__builtin__,fname))
+        except AttributeError:
+            continue
+            
+_generatorType = type((y for y in range(1)))
+ 
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+class _Constants(object):
+    pass
+
+alphas     = string.ascii_uppercase + string.ascii_lowercase
+nums       = "0123456789"
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+        self.args = (pstr, loc, msg)
+
+    @classmethod
+    def _from_exception(cls, pe):
+        """
+        internal factory method to simplify creating one type of ParseException 
+        from another - avoids having __init__ signature conflicts among subclasses
+        """
+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+            - lineno - returns the line number of the exception text
+            - col - returns the column number of the exception text
+            - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join((line_str[:line_column],
+                                markerString, line_str[line_column:]))
+        return line_str.strip()
+    def __dir__(self):
+        return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+    """
+    Exception thrown when parse expressions don't match class;
+    supported attributes by name are:
+     - lineno - returns the line number of the exception text
+     - col - returns the column number of the exception text
+     - line - returns the line containing the exception text
+        
+    Example::
+        try:
+            Word(nums).setName("integer").parseString("ABC")
+        except ParseException as pe:
+            print(pe)
+            print("column: {}".format(pe.col))
+            
+    prints::
+       Expected integer (at char 0), (line:1, col:1)
+        column: 1
+    """
+    pass
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like L{ParseFatalException}, but thrown internally when an
+       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
+       immediately because an unbacktrackable syntax error has been found"""
+    pass
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup[0])
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """
+    Structured parse results, to provide multiple means of access to the parsed data:
+       - as a list (C{len(results)})
+       - by list index (C{results[0], results[1]}, etc.)
+       - by attribute (C{results.} - see L{ParserElement.setResultsName})
+
+    Example::
+        integer = Word(nums)
+        date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+        # equivalent form:
+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+        # parseString returns a ParseResults object
+        result = date_str.parseString("1999/12/31")
+
+        def test(s, fn=repr):
+            print("%s -> %s" % (s, fn(eval(s))))
+        test("list(result)")
+        test("result[0]")
+        test("result['month']")
+        test("result.day")
+        test("'month' in result")
+        test("'minutes' in result")
+        test("result.dump()", str)
+    prints::
+        list(result) -> ['1999', '/', '12', '/', '31']
+        result[0] -> '1999'
+        result['month'] -> '12'
+        result.day -> '31'
+        'month' in result -> True
+        'minutes' in result -> False
+        result.dump() -> ['1999', '/', '12', '/', '31']
+        - day: 31
+        - month: 12
+        - year: 1999
+    """
+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            self.__asList = asList
+            self.__modal = modal
+            if toklist is None:
+                toklist = []
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            elif isinstance(toklist, _generatorType):
+                self.__toklist = list(toklist)
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,(int,slice)):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name,occurrences in self.__tokdict.items():
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return ( not not self.__toklist )
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def _iterkeys( self ):
+        if hasattr(self.__tokdict, "iterkeys"):
+            return self.__tokdict.iterkeys()
+        else:
+            return iter(self.__tokdict)
+
+    def _itervalues( self ):
+        return (self[k] for k in self._iterkeys())
+            
+    def _iteritems( self ):
+        return ((k, self[k]) for k in self._iterkeys())
+
+    if PY_3:
+        keys = _iterkeys       
+        """Returns an iterator of all named result keys (Python 3.x only)."""
+
+        values = _itervalues
+        """Returns an iterator of all named result values (Python 3.x only)."""
+
+        items = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+    else:
+        iterkeys = _iterkeys
+        """Returns an iterator of all named result keys (Python 2.x only)."""
+
+        itervalues = _itervalues
+        """Returns an iterator of all named result values (Python 2.x only)."""
+
+        iteritems = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+        def keys( self ):
+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iterkeys())
+
+        def values( self ):
+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.itervalues())
+                
+        def items( self ):
+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iteritems())
+
+    def haskeys( self ):
+        """Since keys() returns an iterator, this method is helpful in bypassing
+           code that looks for the existence of any defined results names."""
+        return bool(self.__tokdict)
+        
+    def pop( self, *args, **kwargs):
+        """
+        Removes and returns item at specified index (default=C{last}).
+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+        argument or an integer argument, it will use C{list} semantics
+        and pop tokens from the list of parsed tokens. If passed a 
+        non-integer argument (most likely a string), it will use C{dict}
+        semantics and pop the corresponding value from any defined 
+        results names. A second default return value argument is 
+        supported, just as in C{dict.pop()}.
+
+        Example::
+            def remove_first(tokens):
+                tokens.pop(0)
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+            label = Word(alphas)
+            patt = label("LABEL") + OneOrMore(Word(nums))
+            print(patt.parseString("AAB 123 321").dump())
+
+            # Use pop() in a parse action to remove named result (note that corresponding value is not
+            # removed from list form of results)
+            def remove_LABEL(tokens):
+                tokens.pop("LABEL")
+                return tokens
+            patt.addParseAction(remove_LABEL)
+            print(patt.parseString("AAB 123 321").dump())
+        prints::
+            ['AAB', '123', '321']
+            - LABEL: AAB
+
+            ['AAB', '123', '321']
+        """
+        if not args:
+            args = [-1]
+        for k,v in kwargs.items():
+            if k == 'default':
+                args = (args[0], v)
+            else:
+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+        if (isinstance(args[0], int) or 
+                        len(args) == 1 or 
+                        args[0] in self):
+            index = args[0]
+            ret = self[index]
+            del self[index]
+            return ret
+        else:
+            defaultvalue = args[1]
+            return defaultvalue
+
+    def get(self, key, defaultValue=None):
+        """
+        Returns named result matching the given key, or if there is no
+        such name, then returns the given C{defaultValue} or C{None} if no
+        C{defaultValue} is specified.
+
+        Similar to C{dict.get()}.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            result = date_str.parseString("1999/12/31")
+            print(result.get("year")) # -> '1999'
+            print(result.get("hour", "not specified")) # -> 'not specified'
+            print(result.get("hour")) # -> None
+        """
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """
+        Inserts new element at location index in the list of parsed tokens.
+        
+        Similar to C{list.insert()}.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to insert the parse location in the front of the parsed results
+            def insert_locn(locn, tokens):
+                tokens.insert(0, locn)
+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+        """
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name,occurrences in self.__tokdict.items():
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def append( self, item ):
+        """
+        Add single element to end of ParseResults list of elements.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            
+            # use a parse action to compute the sum of the parsed integers, and add it to the end
+            def append_sum(tokens):
+                tokens.append(sum(map(int, tokens)))
+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+        """
+        self.__toklist.append(item)
+
+    def extend( self, itemseq ):
+        """
+        Add sequence of elements to end of ParseResults list of elements.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            
+            # use a parse action to append the reverse of the matched strings, to make a palindrome
+            def make_palindrome(tokens):
+                tokens.extend(reversed([t[::-1] for t in tokens]))
+                return ''.join(tokens)
+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+        """
+        if isinstance(itemseq, ParseResults):
+            self += itemseq
+        else:
+            self.__toklist.extend(itemseq)
+
+    def clear( self ):
+        """
+        Clear all elements and results names.
+        """
+        del self.__toklist[:]
+        self.__tokdict.clear()
+
+    def __getattr__( self, name ):
+        try:
+            return self[name]
+        except KeyError:
+            return ""
+            
+        if name in self.__tokdict:
+            if name not in self.__accumNames:
+                return self.__tokdict[name][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[name] ])
+        else:
+            return ""
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = lambda a: offset if a<0 else a+offset
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+            
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            # useful for merging many ParseResults using sum() builtin
+            return self.copy()
+        else:
+            # this may raise a TypeError - so be it
+            return other + self
+        
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """
+        Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            result = patt.parseString("sldkj lsdkj sldkj")
+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
+            
+            # Use asList() to create an actual list
+            result_list = result.asList()
+            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
+        """
+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+    def asDict( self ):
+        """
+        Returns the named parse results as a nested dictionary.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+            
+            result_dict = result.asDict()
+            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
+
+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+            import json
+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+        """
+        if PY_3:
+            item_fn = self.items
+        else:
+            item_fn = self.iteritems
+            
+        def toItem(obj):
+            if isinstance(obj, ParseResults):
+                if obj.haskeys():
+                    return obj.asDict()
+                else:
+                    return [toItem(v) for v in obj]
+            else:
+                return obj
+                
+        return dict((k,toItem(v)) for k,v in item_fn())
+
+    def copy( self ):
+        """
+        Returns a new copy of a C{ParseResults} object.
+        """
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = self.__tokdict.copy()
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """
+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+        """
+        nl = "\n"
+        out = []
+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist)
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        for i,res in enumerate(self.__toklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "" ]
+
+        out += [ nl, indent, "" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        r"""
+        Returns the results name for this token expression. Useful when several 
+        different expressions might match at a particular location.
+
+        Example::
+            integer = Word(nums)
+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+            house_number_expr = Suppress('#') + Word(nums, alphanums)
+            user_data = (Group(house_number_expr)("house_number") 
+                        | Group(ssn_expr)("ssn")
+                        | Group(integer)("age"))
+            user_info = OneOrMore(user_data)
+            
+            result = user_info.parseString("22 111-22-3333 #221B")
+            for item in result:
+                print(item.getName(), ':', item[0])
+        prints::
+            age : 22
+            ssn : 111-22-3333
+            house_number : 221B
+        """
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+            return next(iter(self.__tokdict.keys()))
+        else:
+            return None
+
+    def dump(self, indent='', depth=0, full=True):
+        """
+        Diagnostic method for listing out the contents of a C{ParseResults}.
+        Accepts an optional C{indent} argument so that this string can be embedded
+        in a nested display of other data.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(result.dump())
+        prints::
+            ['12', '/', '31', '/', '1999']
+            - day: 1999
+            - month: 31
+            - year: 12
+        """
+        out = []
+        NL = '\n'
+        out.append( indent+_ustr(self.asList()) )
+        if full:
+            if self.haskeys():
+                items = sorted((str(k), v) for k,v in self.items())
+                for k,v in items:
+                    if out:
+                        out.append(NL)
+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+                    if isinstance(v,ParseResults):
+                        if v:
+                            out.append( v.dump(indent,depth+1) )
+                        else:
+                            out.append(_ustr(v))
+                    else:
+                        out.append(repr(v))
+            elif any(isinstance(vv,ParseResults) for vv in self):
+                v = self
+                for i,vv in enumerate(v):
+                    if isinstance(vv,ParseResults):
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
+                    else:
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
+            
+        return "".join(out)
+
+    def pprint(self, *args, **kwargs):
+        """
+        Pretty-printer for parsed results as a list, using the C{pprint} module.
+        Accepts additional positional or keyword args as defined for the 
+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+        Example::
+            ident = Word(alphas, alphanums)
+            num = Word(nums)
+            func = Forward()
+            term = ident | num | Group('(' + func + ')')
+            func <<= ident + Group(Optional(delimitedList(term)))
+            result = func.parseString("fna a,b,(fnb c,d,200),100")
+            result.pprint(width=40)
+        prints::
+            ['fna',
+             ['a',
+              'b',
+              ['(', 'fnb', ['c', 'd', '200'], ')'],
+              '100']]
+        """
+        pprint.pprint(self.asList(), *args, **kwargs)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __getnewargs__(self):
+        return self.__toklist, self.__name, self.__asList, self.__modal
+
+    def __dir__(self):
+        return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
+   on parsing strings containing C{}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    s = strg
+    return 1 if 0} for more information
+   on parsing strings containing C{}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+    #~ if func in singleArgBuiltins:
+        #~ return lambda s,l,t: func(t)
+    #~ limit = 0
+    #~ foundArity = False
+    #~ def wrapper(*args):
+        #~ nonlocal limit,foundArity
+        #~ while 1:
+            #~ try:
+                #~ ret = func(*args[limit:])
+                #~ foundArity = True
+                #~ return ret
+            #~ except TypeError:
+                #~ if limit == maxargs or foundArity:
+                    #~ raise
+                #~ limit += 1
+                #~ continue
+    #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+    if func in singleArgBuiltins:
+        return lambda s,l,t: func(t)
+    limit = [0]
+    foundArity = [False]
+    
+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+    if system_version[:2] >= (3,5):
+        def extract_stack(limit=0):
+            # special handling for Python 3.5.0 - extra deep call stack by 1
+            offset = -3 if system_version == (3,5,0) else -2
+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+            return [frame_summary[:2]]
+        def extract_tb(tb, limit=0):
+            frames = traceback.extract_tb(tb, limit=limit)
+            frame_summary = frames[-1]
+            return [frame_summary[:2]]
+    else:
+        extract_stack = traceback.extract_stack
+        extract_tb = traceback.extract_tb
+    
+    # synthesize what would be returned by traceback.extract_stack at the call to 
+    # user's parse action 'func', so that we don't incur call penalty at parse time
+    
+    LINE_DIFF = 6
+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+    this_line = extract_stack(limit=2)[-1]
+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+    def wrapper(*args):
+        while 1:
+            try:
+                ret = func(*args[limit[0]:])
+                foundArity[0] = True
+                return ret
+            except TypeError:
+                # re-raise TypeErrors if they did not come from our arity testing
+                if foundArity[0]:
+                    raise
+                else:
+                    try:
+                        tb = sys.exc_info()[-1]
+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+                            raise
+                    finally:
+                        del tb
+
+                if limit[0] <= maxargs:
+                    limit[0] += 1
+                    continue
+                raise
+
+    # copy func name to wrapper for sensible debug output
+    func_name = ""
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    wrapper.__name__ = func_name
+
+    return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    @staticmethod
+    def setDefaultWhitespaceChars( chars ):
+        r"""
+        Overrides the default whitespace chars
+
+        Example::
+            # default whitespace chars are space,  and newline
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
+            
+            # change to just treat newline as significant
+            ParserElement.setDefaultWhitespaceChars(" \t")
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+
+    @staticmethod
+    def inlineLiteralsUsing(cls):
+        """
+        Set class to be used for inclusion of string literals into a parser.
+        
+        Example::
+            # default literal class used is Literal
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+
+            # change to Suppress
+            ParserElement.inlineLiteralsUsing(Suppress)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
+        """
+        ParserElement._literalStringClass = cls
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """
+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
+        for the same parsing pattern, using copies of the original parse element.
+        
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+            
+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+        prints::
+            [5120, 100, 655360, 268435456]
+        Equivalent form of C{expr.copy()} is just C{expr()}::
+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+        """
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """
+        Define name for this expression, makes debugging and exception messages clearer.
+        
+        Example::
+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
+        """
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """
+        Define name for referencing matching tokens as a nested attribute
+        of the returned parse results.
+        NOTE: this returns a *copy* of the original C{ParserElement} object;
+        this is so that the client can define a basic element, such as an
+        integer, and reference it in multiple places with different names.
+
+        You can also set results names using the abbreviated syntax,
+        C{expr("name")} in place of C{expr.setResultsName("name")} - 
+        see L{I{__call__}<__call__>}.
+
+        Example::
+            date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+
+            # equivalent form:
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set C{breakFlag} to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """
+        Define one or more actions to perform when successfully matching parse element definition.
+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+         - s   = the original string being parsed (see note below)
+         - loc = the location of the matching substring
+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+        If the functions in fns modify the tokens, they can return them as the return
+        value from fn, and the modified list of tokens will replace the original.
+        Otherwise, fn does not need to return any value.
+
+        Optional keyword arguments:
+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+        Note: the default parsing behavior is to expand tabs in the input string
+        before starting the parsing process.  See L{I{parseString}} for more information
+        on parsing strings containing C{}s, and suggested methods to maintain a
+        consistent view of the parsed string, the parse location, and line and column
+        positions within the parsed string.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer + '/' + integer + '/' + integer
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+            # use parse action to convert to ints at parse time
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            date_str = integer + '/' + integer + '/' + integer
+
+            # note that integer fields are now ints, not strings
+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
+        """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = kwargs.get("callDuringTry", False)
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """
+        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
+        
+        See examples in L{I{copy}}.
+        """
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def addCondition(self, *fns, **kwargs):
+        """Add a boolean predicate function to expression's list of parse actions. See 
+        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
+        functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+        Optional keyword arguments:
+         - message = define a custom message to be used in the raised exception
+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+         
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            year_int = integer.copy()
+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+            date_str = year_int + '/' + integer + '/' + integer
+
+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+        """
+        msg = kwargs.get("message", "failed user-defined condition")
+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+        for fn in fns:
+            def pa(s,l,t):
+                if not bool(_trim_arity(fn)(s,l,t)):
+                    raise exc_type(s,l,msg)
+            self.parseAction.append(pa)
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           C{fn(s,loc,expr,err)} where:
+            - s = string being parsed
+            - loc = location where expression match was attempted and failed
+            - expr = the parse expression that failed
+            - err = the exception thrown
+           The function returns no value.  It may throw C{L{ParseFatalException}}
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException as err:
+                #~ print ("Exception raised:", err)
+                if self.debugActions[2]:
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or preloc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        tokens = fn( instring, tokensStart, retTokens )
+                        if tokens is not None:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException as err:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    tokens = fn( instring, tokensStart, retTokens )
+                    if tokens is not None:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+    
+    def canParseNext(self, instring, loc):
+        try:
+            self.tryParse(instring, loc)
+        except (ParseException, IndexError):
+            return False
+        else:
+            return True
+
+    class _UnboundedCache(object):
+        def __init__(self):
+            cache = {}
+            self.not_in_cache = not_in_cache = object()
+
+            def get(self, key):
+                return cache.get(key, not_in_cache)
+
+            def set(self, key, value):
+                cache[key] = value
+
+            def clear(self):
+                cache.clear()
+                
+            def cache_len(self):
+                return len(cache)
+
+            self.get = types.MethodType(get, self)
+            self.set = types.MethodType(set, self)
+            self.clear = types.MethodType(clear, self)
+            self.__len__ = types.MethodType(cache_len, self)
+
+    if _OrderedDict is not None:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = _OrderedDict()
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(cache) > size:
+                        try:
+                            cache.popitem(False)
+                        except KeyError:
+                            pass
+
+                def clear(self):
+                    cache.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    else:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = {}
+                key_fifo = collections.deque([], size)
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(key_fifo) > size:
+                        cache.pop(key_fifo.popleft(), None)
+                    key_fifo.append(key)
+
+                def clear(self):
+                    cache.clear()
+                    key_fifo.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+    packrat_cache_lock = RLock()
+    packrat_cache_stats = [0, 0]
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        HIT, MISS = 0, 1
+        lookup = (self, instring, loc, callPreParse, doActions)
+        with ParserElement.packrat_cache_lock:
+            cache = ParserElement.packrat_cache
+            value = cache.get(lookup)
+            if value is cache.not_in_cache:
+                ParserElement.packrat_cache_stats[MISS] += 1
+                try:
+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
+                except ParseBaseException as pe:
+                    # cache a copy of the exception, without the traceback
+                    cache.set(lookup, pe.__class__(*pe.args))
+                    raise
+                else:
+                    cache.set(lookup, (value[0], value[1].copy()))
+                    return value
+            else:
+                ParserElement.packrat_cache_stats[HIT] += 1
+                if isinstance(value, Exception):
+                    raise value
+                return (value[0], value[1].copy())
+
+    _parse = _parseNoCache
+
+    @staticmethod
+    def resetCache():
+        ParserElement.packrat_cache.clear()
+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+    _packratEnabled = False
+    @staticmethod
+    def enablePackrat(cache_size_limit=128):
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+           
+           Parameters:
+            - cache_size_limit - (default=C{128}) - if an integer value is provided
+              will limit the size of the packrat cache; if None is passed, then
+              the cache size will be unbounded; if 0 is passed, the cache will
+              be effectively disabled.
+            
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method C{ParserElement.enablePackrat()}.  If
+           your program uses C{psyco} to "compile as you go", you must call
+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
+           Python will crash.  For best results, call C{enablePackrat()} immediately
+           after importing pyparsing.
+           
+           Example::
+               import pyparsing
+               pyparsing.ParserElement.enablePackrat()
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            if cache_size_limit is None:
+                ParserElement.packrat_cache = ParserElement._UnboundedCache()
+            else:
+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+            ParserElement._parse = ParserElement._parseCache
+
+    def parseString( self, instring, parseAll=False ):
+        """
+        Execute the parse expression with the given string.
+        This is the main interface to the client code, once the complete
+        expression has been built.
+
+        If you want the grammar to require that the entire input string be
+        successfully parsed, then set C{parseAll} to True (equivalent to ending
+        the grammar with C{L{StringEnd()}}).
+
+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+        in order to report proper column numbers in parse actions.
+        If the input string contains tabs and
+        the grammar uses parse actions that use the C{loc} argument to index into the
+        string being parsed, you can ensure you have a consistent view of the input
+        string by:
+         - calling C{parseWithTabs} on your grammar before calling C{parseString}
+           (see L{I{parseWithTabs}})
+         - define your parse action using the full C{(s,loc,toks)} signature, and
+           reference the input string using the parse action's C{s} argument
+         - explictly expand the tabs in your input string before calling
+           C{parseString}
+        
+        Example::
+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """
+        Scan the input string for expression matches.  Each match will return the
+        matching tokens, start location, and end location.  May be called with optional
+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
+        C{overlap} is specified, then overlapping matches will be reported.
+
+        Note that the start and end locations are reported relative to the string
+        being parsed.  See L{I{parseString}} for more information on parsing
+        strings with embedded tabs.
+
+        Example::
+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+            print(source)
+            for tokens,start,end in Word(alphas).scanString(source):
+                print(' '*start + '^'*(end-start))
+                print(' '*start + tokens[0])
+        
+        prints::
+        
+            sldjf123lsdjjkf345sldkjf879lkjsfd987
+            ^^^^^
+            sldjf
+                    ^^^^^^^
+                    lsdjjkf
+                              ^^^^^^
+                              sldkjf
+                                       ^^^^^^
+                                       lkjsfd
+        """
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def transformString( self, instring ):
+        """
+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+        be returned from a parse action.  To use C{transformString}, define a grammar and
+        attach a parse action to it that modifies the returned token list.
+        Invoking C{transformString()} on a target string will then scan for matches,
+        and replace the matched text patterns according to the logic in the parse
+        action.  C{transformString()} returns the resulting transformed string.
+        
+        Example::
+            wd = Word(alphas)
+            wd.setParseAction(lambda toks: toks[0].title())
+            
+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+        Prints::
+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+        """
+        out = []
+        lastE = 0
+        # force preservation of s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """
+        Another extension to C{L{scanString}}, simplifying the access to the tokens found
+        to match the given parse expression.  May be called with optional
+        C{maxMatches} argument, to clip searching after 'n' matches are found.
+        
+        Example::
+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+            cap_word = Word(alphas.upper(), alphas.lower())
+            
+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+            # the sum() builtin can be used to merge results into a single ParseResults object
+            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+        prints::
+            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+        """
+        Generator method to split a string using the given expression as a separator.
+        May be called with optional C{maxsplit} argument, to limit the number of splits;
+        and the optional C{includeSeparators} argument (default=C{False}), if the separating
+        matching text should be included in the split results.
+        
+        Example::        
+            punc = oneOf(list(".,;:/-!?"))
+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+        prints::
+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+        """
+        splits = 0
+        last = 0
+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+            yield instring[last:s]
+            if includeSeparators:
+                yield t[0]
+            last = e
+        yield instring[last:]
+
+    def __add__(self, other ):
+        """
+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+        converts them to L{Literal}s by default.
+        
+        Example::
+            greet = Word(alphas) + "," + Word(alphas) + "!"
+            hello = "Hello, World!"
+            print (hello, "->", greet.parseString(hello))
+        Prints::
+            Hello, World! -> ['Hello', ',', 'World', '!']
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """
+        Implementation of + operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """
+        Implementation of - operator, returns C{L{And}} with error stop
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return self + And._ErrorStop() + other
+
+    def __rsub__(self, other ):
+        """
+        Implementation of - operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """
+        Implementation of * operator, allows use of C{expr * 3} in place of
+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
+        may also include C{None} as in:
+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+              to C{expr*n + L{ZeroOrMore}(expr)}
+              (read as "at least n instances of C{expr}")
+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+              (read as "0 to n instances of C{expr}")
+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+        Note that C{expr*(None,n)} does not raise an exception if
+        more than n exprs exist in the input stream; that is,
+        C{expr*(None,n)} does not enforce a maximum number of expr
+        occurrences.  If this behavior is desired, then write
+        C{expr*(None,n) + ~expr}
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """
+        Implementation of | operator - returns C{L{MatchFirst}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """
+        Implementation of | operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """
+        Implementation of ^ operator - returns C{L{Or}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """
+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """
+        Implementation of & operator - returns C{L{Each}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """
+        Implementation of & operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """
+        Implementation of ~ operator - returns C{L{NotAny}}
+        """
+        return NotAny( self )
+
+    def __call__(self, name=None):
+        """
+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+        
+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+        passed as C{True}.
+           
+        If C{name} is omitted, same as calling C{L{copy}}.
+
+        Example::
+            # these are equivalent
+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
+        """
+        if name is not None:
+            return self.setResultsName(name)
+        else:
+            return self.copy()
+
+    def suppress( self ):
+        """
+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+        cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """
+        Disables the skipping of whitespace before matching the characters in the
+        C{ParserElement}'s defined pattern.  This is normally only used internally by
+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """
+        Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """
+        Overrides default behavior to expand C{}s to spaces before parsing the input string.
+        Must be called before C{parseString} when the input grammar contains elements that
+        match C{} characters.
+        """
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """
+        Define expression to be ignored (e.g., comments) while doing pattern
+        matching; may be called repeatedly, to define multiple comment or other
+        ignorable patterns.
+        
+        Example::
+            patt = OneOrMore(Word(alphas))
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+            
+            patt.ignore(cStyleComment)
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+        """
+        if isinstance(other, basestring):
+            other = Suppress(other)
+
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append(other)
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        """
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        Set C{flag} to True to enable, False to disable.
+
+        Example::
+            wd = Word(alphas).setName("alphaword")
+            integer = Word(nums).setName("numword")
+            term = wd | integer
+            
+            # turn on debugging for wd
+            wd.setDebug()
+
+            OneOrMore(term).parseString("abc 123 xyz 890")
+        
+        prints::
+            Match alphaword at loc 0(1,1)
+            Matched alphaword -> ['abc']
+            Match alphaword at loc 3(1,4)
+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+            Match alphaword at loc 7(1,8)
+            Matched alphaword -> ['xyz']
+            Match alphaword at loc 11(1,12)
+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+            Match alphaword at loc 15(1,16)
+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+        The output shown is that produced by the default debug actions - custom debug actions can be
+        specified using L{setDebugActions}. Prior to attempting
+        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+        which makes debugging and exception messages easier to understand - for instance, the default
+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+        """
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """
+        Check defined expressions for valid structure, check for infinite recursive definitions.
+        """
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """
+        Execute the parse expression on the given file or filename.
+        If a filename is specified (instead of a file object),
+        the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            with open(file_or_filename, "r") as f:
+                file_contents = f.read()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or vars(self) == vars(other)
+        elif isinstance(other, basestring):
+            return self.matches(other)
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+    def matches(self, testString, parseAll=True):
+        """
+        Method for quick testing of a parser against a test string. Good for simple 
+        inline microtests of sub expressions while building up larger parser.
+           
+        Parameters:
+         - testString - to test against this expression for a match
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+            
+        Example::
+            expr = Word(nums)
+            assert expr.matches("100")
+        """
+        try:
+            self.parseString(_ustr(testString), parseAll=parseAll)
+            return True
+        except ParseBaseException:
+            return False
+                
+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+        """
+        Execute the parse expression on a series of test strings, showing each
+        test, the parsed results or where the parse failed. Quick and easy way to
+        run a parse expression against a list of sample strings.
+           
+        Parameters:
+         - tests - a list of separate test strings, or a multiline string of test strings
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
+              string; pass None to disable comment filtering
+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+              if False, only dump nested list
+         - printResults - (default=C{True}) prints test output to stdout
+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+        Returns: a (success, results) tuple, where success indicates that all tests succeeded
+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
+        test's output
+        
+        Example::
+            number_expr = pyparsing_common.number.copy()
+
+            result = number_expr.runTests('''
+                # unsigned integer
+                100
+                # negative integer
+                -100
+                # float with scientific notation
+                6.02e23
+                # integer with scientific notation
+                1e-12
+                ''')
+            print("Success" if result[0] else "Failed!")
+
+            result = number_expr.runTests('''
+                # stray character
+                100Z
+                # missing leading digit before '.'
+                -.100
+                # too many '.'
+                3.14.159
+                ''', failureTests=True)
+            print("Success" if result[0] else "Failed!")
+        prints::
+            # unsigned integer
+            100
+            [100]
+
+            # negative integer
+            -100
+            [-100]
+
+            # float with scientific notation
+            6.02e23
+            [6.02e+23]
+
+            # integer with scientific notation
+            1e-12
+            [1e-12]
+
+            Success
+            
+            # stray character
+            100Z
+               ^
+            FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+            # missing leading digit before '.'
+            -.100
+            ^
+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+            # too many '.'
+            3.14.159
+                ^
+            FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+            Success
+
+        Each test string must be on a single line. If you want to test a string that spans multiple
+        lines, create a test like this::
+
+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+        
+        (Note that this is a raw string literal, you must include the leading 'r'.)
+        """
+        if isinstance(tests, basestring):
+            tests = list(map(str.strip, tests.rstrip().splitlines()))
+        if isinstance(comment, basestring):
+            comment = Literal(comment)
+        allResults = []
+        comments = []
+        success = True
+        for t in tests:
+            if comment is not None and comment.matches(t, False) or comments and not t:
+                comments.append(t)
+                continue
+            if not t:
+                continue
+            out = ['\n'.join(comments), t]
+            comments = []
+            try:
+                t = t.replace(r'\n','\n')
+                result = self.parseString(t, parseAll=parseAll)
+                out.append(result.dump(full=fullDump))
+                success = success and not failureTests
+            except ParseBaseException as pe:
+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+                if '\n' in t:
+                    out.append(line(pe.loc, t))
+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+                else:
+                    out.append(' '*pe.loc + '^' + fatal)
+                out.append("FAIL: " + str(pe))
+                success = success and failureTests
+                result = pe
+            except Exception as exc:
+                out.append("FAIL-EXCEPTION: " + str(exc))
+                success = success and failureTests
+                result = exc
+
+            if printResults:
+                if fullDump:
+                    out.append('')
+                print('\n'.join(out))
+
+            allResults.append((t, result))
+        
+        return success, allResults
+
+        
+class Token(ParserElement):
+    """
+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+    """
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+    """
+    An empty token, will always match.
+    """
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """
+    A token that will never match.
+    """
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+    """
+    Token to exactly match a specified string.
+    
+    Example::
+        Literal('blah').parseString('blah')  # -> ['blah']
+        Literal('blah').parseString('blahfooblah')  # -> ['blah']
+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
+    
+    For case-insensitive matching, use L{CaselessLiteral}.
+    
+    For keyword matching (force word break before and after the matched string),
+    use L{Keyword} or L{CaselessKeyword}.
+    """
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+    """
+    Token to exactly match a specified string as a keyword, that is, it must be
+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+    Accepts two optional constructor arguments in addition to the keyword string:
+     - C{identChars} is a string of characters that would be valid identifier characters,
+          defaulting to all alphanumerics + "_" and "$"
+     - C{caseless} allows case-insensitive matching, default is C{False}.
+       
+    Example::
+        Keyword("start").parseString("start")  # -> ['start']
+        Keyword("start").parseString("starting")  # -> Exception
+
+    For case-insensitive matching, use L{CaselessKeyword}.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=None, caseless=False ):
+        super(Keyword,self).__init__()
+        if identChars is None:
+            identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    @staticmethod
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+    """
+    Token to match a specified string, ignoring case of letters.
+    Note: the matched results will always be in the case of the given
+    match string, NOT the case of the input text.
+
+    Example::
+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessKeyword}.)
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+    """
+    Caseless version of L{Keyword}.
+
+    Example::
+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessLiteral}.)
+    """
+    def __init__( self, matchString, identChars=None ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+    """
+    A variation on L{Literal} which matches "close" matches, that is, 
+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+     - C{match_string} - string to be matched
+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+    
+    The results from a successful parse will contain the matched text from the input string and the following named results:
+     - C{mismatches} - a list of the positions within the match_string where mismatches were found
+     - C{original} - the original match_string used to compare against the input string
+    
+    If C{mismatches} is an empty list, then the match was an exact match.
+    
+    Example::
+        patt = CloseMatch("ATCATCGAATGGA")
+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+        # exact match
+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+        # close match allowing up to 2 mismatches
+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+    """
+    def __init__(self, match_string, maxMismatches=1):
+        super(CloseMatch,self).__init__()
+        self.name = match_string
+        self.match_string = match_string
+        self.maxMismatches = maxMismatches
+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+        self.mayIndexError = False
+        self.mayReturnEmpty = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        start = loc
+        instrlen = len(instring)
+        maxloc = start + len(self.match_string)
+
+        if maxloc <= instrlen:
+            match_string = self.match_string
+            match_stringloc = 0
+            mismatches = []
+            maxMismatches = self.maxMismatches
+
+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+                src,mat = s_m
+                if src != mat:
+                    mismatches.append(match_stringloc)
+                    if len(mismatches) > maxMismatches:
+                        break
+            else:
+                loc = match_stringloc + 1
+                results = ParseResults([instring[start:loc]])
+                results['original'] = self.match_string
+                results['mismatches'] = mismatches
+                return loc, results
+
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+    """
+    Token for matching words composed of allowed character sets.
+    Defined with string containing all allowed initial characters,
+    an optional string containing allowed body characters (if omitted,
+    defaults to the initial character set), and an optional minimum,
+    maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction. An optional
+    C{excludeChars} parameter can list characters that might be found in 
+    the input C{bodyChars} string; useful to define a word of all printables
+    except for one or two characters, for instance.
+    
+    L{srange} is useful for defining custom character set strings for defining 
+    C{Word} expressions, using range notation from regular expression character sets.
+    
+    A common mistake is to use C{Word} to match a specific literal string, as in 
+    C{Word("Address")}. Remember that C{Word} uses the string argument to define
+    I{sets} of matchable characters. This expression would match "Add", "AAA",
+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+    To match an exact literal string, use L{Literal} or L{Keyword}.
+
+    pyparsing includes helper strings for building Words:
+     - L{alphas}
+     - L{nums}
+     - L{alphanums}
+     - L{hexnums}
+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+     - L{printables} (any non-whitespace character)
+
+    Example::
+        # a word composed of digits
+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+        
+        # a word with a leading capital, and zero or more lowercase
+        capital_word = Word(alphas.upper(), alphas.lower())
+
+        # hostnames are alphanumeric, with leading alpha, and '-'
+        hostname = Word(alphas, alphanums+'-')
+        
+        # roman numeral (not a strict parser, accepts invalid mix of characters)
+        roman = Word("IVXLCDM")
+        
+        # any string of non-whitespace characters, except for ','
+        csv_value = Word(printables, excludeChars=",")
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            initChars = ''.join(c for c in initChars if c not in excludeChars)
+            if bodyChars:
+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.initCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except Exception:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                raise ParseException(instring, loc, self.errmsg, self)
+
+            loc = result.end()
+            return loc, result.group()
+
+        if not(instring[ loc ] in self.initChars):
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        if self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Regex(Token):
+    r"""
+    Token for matching strings that match a given regular expression.
+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
+    named parse results.
+
+    Example::
+        realnum = Regex(r"[+-]?\d+\.\d*")
+        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0):
+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if not pattern:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+            
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        d = result.groupdict()
+        ret = ParseResults(result.group())
+        if d:
+            for k in d:
+                ret[k] = d[k]
+        return loc,ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+
+class QuotedString(Token):
+    r"""
+    Token for matching strings that are delimited by quoting characters.
+    
+    Defined with the following parameters:
+        - quoteChar - string of one or more characters defining the quote delimiting string
+        - escChar - character to escape quotes, typically backslash (default=C{None})
+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+    Example::
+        qs = QuotedString('"')
+        print(qs.searchString('lsjdf "This is the quote" sldjf'))
+        complex_qs = QuotedString('{{', endQuoteChar='}}')
+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+        sql_qs = QuotedString('"', escQuote='""')
+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+    prints::
+        [['This is the quote']]
+        [['This is the "quote"']]
+        [['This is the quote with "embedded" quotes']]
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if not quoteChar:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if not endQuoteChar:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+        self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped whitespace
+                if '\\' in ret and self.convertWhitespaceEscapes:
+                    ws_map = {
+                        r'\t' : '\t',
+                        r'\n' : '\n',
+                        r'\f' : '\f',
+                        r'\r' : '\r',
+                    }
+                    for wslit,wschar in ws_map.items():
+                        ret = ret.replace(wslit, wschar)
+
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """
+    Token for matching words composed of characters I{not} in a given set (will
+    include whitespace in matched characters if not listed in the provided exclusion set - see example).
+    Defined with string containing all disallowed characters, and an optional
+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction.
+
+    Example::
+        # define a comma-separated-value as anything that is not a ','
+        csv_value = CharsNotIn(',')
+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+    prints::
+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """
+    Special matching class for matching whitespace.  Normally, whitespace is ignored
+    by pyparsing grammars.  This class is included when some whitespace structures
+    are significant.  Define with a string containing the whitespace characters to be
+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
+    as defined for the C{L{Word}} class.
+    """
+    whiteStrs = {
+        " " : "",
+        "\t": "",
+        "\n": "",
+        "\r": "",
+        "\f": "",
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not(instring[ loc ] in self.matchWhite):
+            raise ParseException(instring, loc, self.errmsg, self)
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """
+    Token to advance to a specific column of input text; useful for tabular report scraping.
+    """
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+
+class LineStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of a line within the parse string
+    
+    Example::
+    
+        test = '''\
+        AAA this line
+        AAA and this line
+          AAA but not this one
+        B AAA and definitely not this one
+        '''
+
+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+            print(t)
+    
+    Prints::
+        ['AAA', ' this line']
+        ['AAA', ' and this line']    
+
+    """
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.errmsg = "Expected start of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if col(loc, instring) == 1:
+            return loc, []
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+    """
+    Matches if current position is at the end of a line within the parse string
+    """
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc len(instring):
+            return loc, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+    """
+    Matches if the current position is at the beginning of a Word, and
+    is not preceded by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+    the string being parsed, or at the beginning of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """
+    Matches if the current position is at the end of a Word, and
+    is not followed by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+    the string being parsed, or at the end of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                # save match among all matches, to retry longest to shortest
+                matches.append((loc2, e))
+
+        if matches:
+            matches.sort(key=lambda x: -x[0])
+            for _,e in matches:
+                try:
+                    return e._parse( instring, loc, doActions )
+                except ParseException as err:
+                    err.__traceback__ = None
+                    if err.loc > maxExcLoc:
+                        maxException = err
+                        maxExcLoc = err.loc
+
+        if maxException is not None:
+            maxException.msg = self.errmsg
+            raise maxException
+        else:
+            raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the first one listed is the one that will match.
+    May be constructed using the C{'|'} operator.
+
+    Example::
+        # construct MatchFirst using '|' operator
+        
+        # watch the order of expressions to match
+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+        # put more selective expression first
+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException as err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                maxException.msg = self.errmsg
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found, but in any order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'&'} operator.
+
+    Example::
+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+        integer = Word(nums)
+        shape_attr = "shape:" + shape_type("shape")
+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+        color_attr = "color:" + color("color")
+        size_attr = "size:" + integer("size")
+
+        # use Each (using operator '&') to accept attributes in any order 
+        # (shape and posn are required, color and size are optional)
+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+        shape_spec.runTests('''
+            shape: SQUARE color: BLACK posn: 100, 120
+            shape: CIRCLE size: 50 color: BLUE posn: 50,80
+            color:GREEN size:20 shape:TRIANGLE posn:20,40
+            '''
+            )
+    prints::
+        shape: SQUARE color: BLACK posn: 100, 120
+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+        - color: BLACK
+        - posn: ['100', ',', '120']
+          - x: 100
+          - y: 120
+        - shape: SQUARE
+
+
+        shape: CIRCLE size: 50 color: BLUE posn: 50,80
+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+        - color: BLUE
+        - posn: ['50', ',', '80']
+          - x: 50
+          - y: 80
+        - shape: CIRCLE
+        - size: 50
+
+
+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+        - color: GREEN
+        - posn: ['20', ',', '40']
+          - x: 20
+          - y: 40
+        - shape: TRIANGLE
+        - size: 20
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.skipWhitespace = True
+        self.initExprGroups = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(self.opt1map.get(id(e),e))
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join(_ustr(e) for e in tmpReqd)
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = sum(resultlist, ParseResults([]))
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """
+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            if issubclass(ParserElement._literalStringClass, Token):
+                expr = ParserElement._literalStringClass(expr)
+            else:
+                expr = ParserElement._literalStringClass(Literal(expr))
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """
+    Lookahead matching of the given parse expression.  C{FollowedBy}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression matches at the current
+    position.  C{FollowedBy} always returns a null token list.
+
+    Example::
+        # use FollowedBy to match a label only if it is followed by a ':'
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+    prints::
+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+    """
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self.expr.tryParse( instring, loc )
+        return loc, []
+
+
+class NotAny(ParseElementEnhance):
+    """
+    Lookahead to disallow matching with the given parse expression.  C{NotAny}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression does I{not} match at the current
+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+    always returns a null token list.  May be constructed using the '~' operator.
+
+    Example::
+        
+    """
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr.canParseNext(instring, loc):
+            raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+    def __init__( self, expr, stopOn=None):
+        super(_MultipleMatch, self).__init__(expr)
+        self.saveAsList = True
+        ender = stopOn
+        if isinstance(ender, basestring):
+            ender = ParserElement._literalStringClass(ender)
+        self.not_ender = ~ender if ender is not None else None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self_expr_parse = self.expr._parse
+        self_skip_ignorables = self._skipIgnorables
+        check_ender = self.not_ender is not None
+        if check_ender:
+            try_not_ender = self.not_ender.tryParse
+        
+        # must be at least one (but first see if we are the stopOn sentinel;
+        # if so, fail)
+        if check_ender:
+            try_not_ender(instring, loc)
+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = (not not self.ignoreExprs)
+            while 1:
+                if check_ender:
+                    try_not_ender(instring, loc)
+                if hasIgnoreExprs:
+                    preloc = self_skip_ignorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.haskeys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+        
+class OneOrMore(_MultipleMatch):
+    """
+    Repetition of one or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match one or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: BLACK"
+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+        
+        # could also be written as
+        (attr_expr * (1,)).parseString(text).pprint()
+    """
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+    """
+    Optional repetition of zero or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match zero or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example: similar to L{OneOrMore}
+    """
+    def __init__( self, expr, stopOn=None):
+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+        self.mayReturnEmpty = True
+        
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+        except (ParseException,IndexError):
+            return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """
+    Optional matching of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - default (optional) - value to be returned if the optional expression is not found.
+
+    Example::
+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+        zip.runTests('''
+            # traditional ZIP code
+            12345
+            
+            # ZIP+4 form
+            12101-0001
+            
+            # invalid ZIP
+            98765-
+            ''')
+    prints::
+        # traditional ZIP code
+        12345
+        ['12345']
+
+        # ZIP+4 form
+        12101-0001
+        ['12101-0001']
+
+        # invalid ZIP
+        98765-
+             ^
+        FAIL: Expected end of text (at char 5), (line:1, col:6)
+    """
+    def __init__( self, expr, default=_optionalNotMatched ):
+        super(Optional,self).__init__( expr, savelist=False )
+        self.saveAsList = self.expr.saveAsList
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+    """
+    Token for skipping over all undefined text until the matched expression is found.
+
+    Parameters:
+     - expr - target expression marking the end of the data to be skipped
+     - include - (default=C{False}) if True, the target expression is also parsed 
+          (the skipped text and target expression are returned as a 2-element list).
+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
+          comments) that might contain false matches to the target expression
+     - failOn - (default=C{None}) define expressions that are not allowed to be 
+          included in the skipped test; if found before the target expression is found, 
+          the SkipTo is not a match
+
+    Example::
+        report = '''
+            Outstanding Issues Report - 1 Jan 2000
+
+               # | Severity | Description                               |  Days Open
+            -----+----------+-------------------------------------------+-----------
+             101 | Critical | Intermittent system crash                 |          6
+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
+              79 | Minor    | System slow when running too many reports |         47
+            '''
+        integer = Word(nums)
+        SEP = Suppress('|')
+        # use SkipTo to simply match everything up until the next SEP
+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+        # - parse action will call token.strip() for each matched token, i.e., the description body
+        string_data = SkipTo(SEP, ignore=quotedString)
+        string_data.setParseAction(tokenMap(str.strip))
+        ticket_expr = (integer("issue_num") + SEP 
+                      + string_data("sev") + SEP 
+                      + string_data("desc") + SEP 
+                      + integer("days_open"))
+        
+        for tkt in ticket_expr.searchString(report):
+            print tkt.dump()
+    prints::
+        ['101', 'Critical', 'Intermittent system crash', '6']
+        - days_open: 6
+        - desc: Intermittent system crash
+        - issue_num: 101
+        - sev: Critical
+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+        - days_open: 14
+        - desc: Spelling error on Login ('log|n')
+        - issue_num: 94
+        - sev: Cosmetic
+        ['79', 'Minor', 'System slow when running too many reports', '47']
+        - days_open: 47
+        - desc: System slow when running too many reports
+        - issue_num: 79
+        - sev: Minor
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.asList = False
+        if isinstance(failOn, basestring):
+            self.failOn = ParserElement._literalStringClass(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startloc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        expr_parse = self.expr._parse
+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+        
+        tmploc = loc
+        while tmploc <= instrlen:
+            if self_failOn_canParseNext is not None:
+                # break if failOn expression matches
+                if self_failOn_canParseNext(instring, tmploc):
+                    break
+                    
+            if self_ignoreExpr_tryParse is not None:
+                # advance past ignore expressions
+                while 1:
+                    try:
+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+                    except ParseBaseException:
+                        break
+            
+            try:
+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+            except (ParseException, IndexError):
+                # no match, advance loc in string
+                tmploc += 1
+            else:
+                # matched skipto expr, done
+                break
+
+        else:
+            # ran off the end of the input string without matching skipto expr, fail
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        # build up return values
+        loc = tmploc
+        skiptext = instring[startloc:loc]
+        skipresult = ParseResults(skiptext)
+        
+        if self.includeMatch:
+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+            skipresult += mat
+
+        return loc, skipresult
+
+class Forward(ParseElementEnhance):
+    """
+    Forward declaration of an expression to be defined later -
+    used for recursive grammars, such as algebraic infix notation.
+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+    Specifically, '|' has a lower precedence than '<<', so that::
+        fwdExpr << a | b | c
+    will actually be evaluated as::
+        (fwdExpr << a) | b | c
+    thereby leaving b and c out as parseable alternatives.  It is recommended that you
+    explicitly group the values inserted into the C{Forward}::
+        fwdExpr << (a | b | c)
+    Converting to use the '<<=' operator instead will avoid this problem.
+
+    See L{ParseResults.pprint} for an example of a recursive parser created using
+    C{Forward}.
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass(other)
+        self.expr = other
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return self
+        
+    def __ilshift__(self, other):
+        return self << other
+    
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+        return self.__class__.__name__ + ": ..."
+
+        # stubbed out for now - creates awful memory and perf issues
+        self._revertClass = self.__class__
+        self.__class__ = _ForwardNoRecurse
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            self.__class__ = self._revertClass
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret <<= self
+            return ret
+
+class _ForwardNoRecurse(Forward):
+    def __str__( self ):
+        return "..."
+
+class TokenConverter(ParseElementEnhance):
+    """
+    Abstract subclass of C{ParseExpression}, for converting parsed results.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Combine(TokenConverter):
+    """
+    Converter to concatenate all matching tokens to a single string.
+    By default, the matching patterns must also be contiguous in the input string;
+    this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+    Example::
+        real = Word(nums) + '.' + Word(nums)
+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+        # will also erroneously match the following
+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+        real = Combine(Word(nums) + '.' + Word(nums))
+        print(real.parseString('3.1416')) # -> ['3.1416']
+        # no match when there are internal spaces
+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and retToks.haskeys():
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """
+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+    Example::
+        ident = Word(alphas)
+        num = Word(nums)
+        term = ident | num
+        func = ident + Optional(delimitedList(term))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
+
+        func = ident + Group(Optional(delimitedList(term)))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
+    """
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """
+    Converter to return a repetitive expression as a list, but also as a dictionary.
+    Each element can also be referenced using the first token in the expression as its key.
+    Useful for tabular report scraping when the first column can be used as a item key.
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        # print attributes as plain groups
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+        print(result.dump())
+        
+        # access named fields as dict entries, or output as dict
+        print(result['shape'])        
+        print(result.asDict())
+    prints::
+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+    See more examples at L{ParseResults} of accessing fields by results name.
+    """
+    def __init__( self, expr ):
+        super(Dict,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """
+    Converter for ignoring the results of a parsed expression.
+
+    Example::
+        source = "a, b, c,d"
+        wd = Word(alphas)
+        wd_list1 = wd + ZeroOrMore(',' + wd)
+        print(wd_list1.parseString(source))
+
+        # often, delimiters that are useful during parsing are just in the
+        # way afterward - use Suppress to keep them out of the parsed output
+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+        print(wd_list2.parseString(source))
+    prints::
+        ['a', ',', 'b', ',', 'c', ',', 'd']
+        ['a', 'b', 'c', 'd']
+    (See also L{delimitedList}.)
+    """
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """
+    Decorator for debugging parse actions. 
+    
+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+    Example::
+        wd = Word(alphas)
+
+        @traceParseAction
+        def remove_duplicate_chars(tokens):
+            return ''.join(sorted(set(''.join(tokens))))
+
+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+    prints::
+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+        <3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception as exc:
+            sys.stderr.write( "< ['aa', 'bb', 'cc']
+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """
+    Helper to define a counted list of expressions.
+    This helper defines a pattern of the form::
+        integer expr expr expr...
+    where the leading integer tells how many expr expressions follow.
+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+    
+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+    Example::
+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
+
+        # in this parser, the leading integer value is given in binary,
+        # '10' indicating that 2 values are in the array
+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousLiteral(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+    If this is not desired, use C{matchPreviousExpr}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And(Literal(tt) for tt in tflat)
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def matchPreviousExpr(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousExpr(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+    the expressions are evaluated first, and then compared, so
+    C{"1"} is compared with C{"10"}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep <<= e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """
+    Helper to quickly define a set of alternative Literals, and makes sure to do
+    longest-first testing when there is a conflict, regardless of the input order,
+    but returns a C{L{MatchFirst}} for best performance.
+
+    Parameters:
+     - strs - a string of space-delimited literals, or a collection of string literals
+     - caseless - (default=C{False}) - treat all literals as caseless
+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+          if creating a C{Regex} raises an exception)
+
+    Example::
+        comp_oper = oneOf("< = > <= >= !=")
+        var = Word(alphas)
+        number = Word(nums)
+        term = var | number
+        comparison_expr = term + comp_oper + term
+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
+    prints::
+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    symbols = []
+    if isinstance(strs,basestring):
+        symbols = strs.split()
+    elif isinstance(strs, Iterable):
+        symbols = list(strs)
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or iterable",
+                SyntaxWarning, stacklevel=2)
+    if not symbols:
+        return NoMatch()
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+            else:
+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+        except Exception:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+    """
+    Helper to easily and clearly define a dictionary by specifying the respective patterns
+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+    in the proper order.  The key pattern can include delimiting markers or punctuation,
+    as long as they are suppressed, thereby leaving the significant key text.  The value
+    pattern can include named results, so that the C{Dict} results can include named token
+    fields.
+
+    Example::
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        attr_label = label
+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+        # similar to Dict, but simpler call format
+        result = dictOf(attr_label, attr_value).parseString(text)
+        print(result.dump())
+        print(result['shape'])
+        print(result.shape)  # object attribute access works too
+        print(result.asDict())
+    prints::
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        SQUARE
+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+    """
+    return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+    """
+    Helper to return the original, untokenized text for a given expression.  Useful to
+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+    revert separate tokens with intervening whitespace back to the original matching
+    input text. By default, returns astring containing the original parsed text.  
+       
+    If the optional C{asString} argument is passed as C{False}, then the return value is a 
+    C{L{ParseResults}} containing any results names that were originally matched, and a 
+    single token containing the original matched text from the input string.  So if 
+    the expression passed to C{L{originalTextFor}} contains expressions with defined
+    results names, you must set C{asString} to C{False} if you want to preserve those
+    results name values.
+
+    Example::
+        src = "this is test  bold text  normal text "
+        for tag in ("b","i"):
+            opener,closer = makeHTMLTags(tag)
+            patt = originalTextFor(opener + SkipTo(closer) + closer)
+            print(patt.searchString(src)[0])
+    prints::
+        [' bold text ']
+        ['text']
+    """
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+    matchExpr.setParseAction(extractText)
+    matchExpr.ignoreExprs = expr.ignoreExprs
+    return matchExpr
+
+def ungroup(expr): 
+    """
+    Helper to undo pyparsing's default grouping of And expressions, even
+    if all but one are non-empty.
+    """
+    return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+    """
+    Helper to decorate a returned token with its starting and ending locations in the input string.
+    This helper adds the following results names:
+     - locn_start = location where matched expression begins
+     - locn_end = location where matched expression ends
+     - value = the actual parsed results
+
+    Be careful if the input text contains C{} characters, you may want to call
+    C{L{ParserElement.parseWithTabs}}
+
+    Example::
+        wd = Word(alphas)
+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+            print(match)
+    prints::
+        [[0, 'ljsdf', 5]]
+        [[8, 'lksdjjf', 15]]
+        [[18, 'lkkjj', 23]]
+    """
+    locator = Empty().setParseAction(lambda s,l,t: l)
+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+    r"""
+    Helper to easily define string ranges for use in Word construction.  Borrows
+    syntax from regexp '[]' string range definitions::
+        srange("[0-9]")   -> "0123456789"
+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+    The input string must be enclosed in []'s, and the returned string is the expanded
+    character set joined into a single string.
+    The values enclosed in the []'s may be:
+     - a single character
+     - an escaped character with a leading backslash (such as C{\-} or C{\]})
+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
+         (C{\0x##} is also supported for backwards compatibility) 
+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+    """
+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+    try:
+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+    except Exception:
+        return ""
+
+def matchOnlyAtCol(n):
+    """
+    Helper method for defining parse actions that require matching at a specific
+    column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """
+    Helper method for common parse actions that simply return a literal value.  Especially
+    useful when used with C{L{transformString}()}.
+
+    Example::
+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+        term = na | num
+        
+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+    """
+    Helper parse action for removing quotation marks from parsed quoted strings.
+
+    Example::
+        # by default, quotation marks are included in parsed results
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use removeQuotes to strip quotation marks from parsed results
+        quotedString.setParseAction(removeQuotes)
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+def tokenMap(func, *args):
+    """
+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
+    args are passed, they are forwarded to the given function as additional arguments after
+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+    parsed data to an integer using base 16.
+
+    Example (compare the last to example in L{ParserElement.transformString}::
+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+        hex_ints.runTests('''
+            00 11 22 aa FF 0a 0d 1a
+            ''')
+        
+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+        OneOrMore(upperword).runTests('''
+            my kingdom for a horse
+            ''')
+
+        wd = Word(alphas).setParseAction(tokenMap(str.title))
+        OneOrMore(wd).setParseAction(' '.join).runTests('''
+            now is the winter of our discontent made glorious summer by this sun of york
+            ''')
+    prints::
+        00 11 22 aa FF 0a 0d 1a
+        [0, 17, 34, 170, 255, 10, 13, 26]
+
+        my kingdom for a horse
+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+        now is the winter of our discontent made glorious summer by this sun of york
+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+    """
+    def pa(s,l,t):
+        return [func(tokn, *args) for tokn in t]
+
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    pa.__name__ = func_name
+
+    return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+    
+def _makeTags(tagStr, xml):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    else:
+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+                Optional( Suppress("=") + tagAttrValue ) ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    closeTag = Combine(_L("")
+
+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
+    openTag.tag = resname
+    closeTag.tag = resname
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+    Example::
+        text = 'More info at the pyparsing wiki page'
+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+        a,a_end = makeHTMLTags("A")
+        link_expr = a + SkipTo(a_end)("link_text") + a_end
+        
+        for link in link_expr.searchString(text):
+            # attributes in the  tag (like "href" shown here) are also accessible as named results
+            print(link.link_text, '->', link.href)
+    prints::
+        pyparsing -> http://pyparsing.wikispaces.com
+    """
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+    tags only in the given upper/lower case.
+
+    Example: similar to L{makeHTMLTags}
+    """
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """
+    Helper to create a validating parse action to be used with start tags created
+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+    with a required attribute value, to avoid false matches on common tags such as
+    C{} or C{
}. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python + reserved word, as in C{**{"class":"Customer", "align":"right"}} + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = _Constants() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. + + Parameters: + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single grammar + should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond the + the current level; set to False for block of left-most statements + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" + +htmlComment = Regex(r"").setName("HTML comment") +"Comment of the form C{}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" + +javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers}, L{reals}, L{scientific notation}) + - common L{programming identifiers} + - network addresses (L{MAC}, L{IPv4}, L{IPv6}) + - ISO8601 L{dates} and L{datetime} + - L{UUID} + - L{comma-separated list} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = 'More info at the
pyparsing wiki page' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fed59295403b80b1711d0d189ff97dde22808690 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py @@ -0,0 +1,73 @@ +import importlib.util +import sys + + +class VendorImporter: + """ + A PEP 302 meta path importer for finding optionally-vendored + or otherwise naturally-installed packages from root_name. + """ + + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): + self.root_name = root_name + self.vendored_names = set(vendored_names) + self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') + + @property + def search_path(self): + """ + Search first the vendor package then as a natural package. + """ + yield self.vendor_pkg + '.' + yield '' + + def _module_matches_namespace(self, fullname): + """Figure out if the target module is vendored.""" + root, base, target = fullname.partition(self.root_name + '.') + return not root and any(map(target.startswith, self.vendored_names)) + + def load_module(self, fullname): + """ + Iterate over the search path to locate and load fullname. + """ + root, base, target = fullname.partition(self.root_name + '.') + for prefix in self.search_path: + try: + extant = prefix + target + __import__(extant) + mod = sys.modules[extant] + sys.modules[fullname] = mod + return mod + except ImportError: + pass + else: + raise ImportError( + "The '{target}' package is required; " + "normally this is bundled with this package so if you get " + "this warning, consult the packager of your " + "distribution.".format(**locals()) + ) + + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + + def find_spec(self, fullname, path=None, target=None): + """Return a module spec for vendored names.""" + return ( + importlib.util.spec_from_loader(fullname, self) + if self._module_matches_namespace(fullname) else None + ) + + def install(self): + """ + Install this importer into sys.meta_path if not already present. + """ + if self not in sys.meta_path: + sys.meta_path.append(self) + + +names = 'packaging', 'pyparsing', 'appdirs' +VendorImporter(__name__, names).install() diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7253826eddc966a5789761d0fa1e497168b80b8d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ebe07a7f29940d0987cadc4b6522b69698c78a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py b/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..fe80d28f4658fdabdc1ec17b830fef00d20d41b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py @@ -0,0 +1,6 @@ +import setuptools +setuptools.setup( + name="my-test-package", + version="1.0", + zip_safe=True, +) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60989896ae8b3f69efc7d2350add8f6f19d85669 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/__init__.py @@ -0,0 +1,12 @@ +""" +A module that helps solving problems in physics. +""" + +from . import units +from .matrices import mgamma, msigma, minkowski_tensor, mdft + +__all__ = [ + 'units', + + 'mgamma', 'msigma', 'minkowski_tensor', 'mdft', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a9812dc903673b4146916567037cd979aa98753 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d1c9593f3afcd5a42de1c3ff192fa2144a39824 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/gamma_matrices.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/gamma_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..40c3d0754438902f304d01c2df354dd09f9ea257 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/gamma_matrices.py @@ -0,0 +1,716 @@ +""" + Module to handle gamma matrices expressed as tensor objects. + + Examples + ======== + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex + >>> from sympy.tensor.tensor import tensor_indices + >>> i = tensor_indices('i', LorentzIndex) + >>> G(i) + GammaMatrix(i) + + Note that there is already an instance of GammaMatrixHead in four dimensions: + GammaMatrix, which is simply declare as + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix + >>> from sympy.tensor.tensor import tensor_indices + >>> i = tensor_indices('i', LorentzIndex) + >>> GammaMatrix(i) + GammaMatrix(i) + + To access the metric tensor + + >>> LorentzIndex.metric + metric(LorentzIndex,LorentzIndex) + +""" +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.matrices.dense import eye +from sympy.matrices.expressions.trace import trace +from sympy.tensor.tensor import TensorIndexType, TensorIndex,\ + TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry + + +# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_name="S") + + +LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_name="L") + + +GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex], + TensorSymmetry.no_symmetry(1), comm=None) + + +def extract_type_tens(expression, component): + """ + Extract from a ``TensExpr`` all tensors with `component`. + + Returns two tensor expressions: + + * the first contains all ``Tensor`` of having `component`. + * the second contains all remaining. + + + """ + if isinstance(expression, Tensor): + sp = [expression] + elif isinstance(expression, TensMul): + sp = expression.args + else: + raise ValueError('wrong type') + + # Collect all gamma matrices of the same dimension + new_expr = S.One + residual_expr = S.One + for i in sp: + if isinstance(i, Tensor) and i.component == component: + new_expr *= i + else: + residual_expr *= i + return new_expr, residual_expr + + +def simplify_gamma_expression(expression): + extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix) + res_expr = _simplify_single_line(extracted_expr) + return res_expr * residual_expr + + +def simplify_gpgp(ex, sort=True): + """ + simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)`` + + Examples + ======== + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \ + LorentzIndex, simplify_gpgp + >>> from sympy.tensor.tensor import tensor_indices, tensor_heads + >>> p, q = tensor_heads('p, q', [LorentzIndex]) + >>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex) + >>> ps = p(i0)*G(-i0) + >>> qs = q(i0)*G(-i0) + >>> simplify_gpgp(ps*qs*qs) + GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1) + """ + def _simplify_gpgp(ex): + components = ex.components + a = [] + comp_map = [] + for i, comp in enumerate(components): + comp_map.extend([i]*comp.rank) + dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum] + for i in range(len(components)): + if components[i] != GammaMatrix: + continue + for dx in dum: + if dx[2] == i: + p_pos1 = dx[3] + elif dx[3] == i: + p_pos1 = dx[2] + else: + continue + comp1 = components[p_pos1] + if comp1.comm == 0 and comp1.rank == 1: + a.append((i, p_pos1)) + if not a: + return ex + elim = set() + tv = [] + hit = True + coeff = S.One + ta = None + while hit: + hit = False + for i, ai in enumerate(a[:-1]): + if ai[0] in elim: + continue + if ai[0] != a[i + 1][0] - 1: + continue + if components[ai[1]] != components[a[i + 1][1]]: + continue + elim.add(ai[0]) + elim.add(ai[1]) + elim.add(a[i + 1][0]) + elim.add(a[i + 1][1]) + if not ta: + ta = ex.split() + mu = TensorIndex('mu', LorentzIndex) + hit = True + if i == 0: + coeff = ex.coeff + tx = components[ai[1]](mu)*components[ai[1]](-mu) + if len(a) == 2: + tx *= 4 # eye(4) + tv.append(tx) + break + + if tv: + a = [x for j, x in enumerate(ta) if j not in elim] + a.extend(tv) + t = tensor_mul(*a)*coeff + # t = t.replace(lambda x: x.is_Matrix, lambda x: 1) + return t + else: + return ex + + if sort: + ex = ex.sorted_components() + # this would be better off with pattern matching + while 1: + t = _simplify_gpgp(ex) + if t != ex: + ex = t + else: + return t + + +def gamma_trace(t): + """ + trace of a single line of gamma matrices + + Examples + ======== + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \ + gamma_trace, LorentzIndex + >>> from sympy.tensor.tensor import tensor_indices, tensor_heads + >>> p, q = tensor_heads('p, q', [LorentzIndex]) + >>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex) + >>> ps = p(i0)*G(-i0) + >>> qs = q(i0)*G(-i0) + >>> gamma_trace(G(i0)*G(i1)) + 4*metric(i0, i1) + >>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0) + 0 + >>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0) + 0 + + """ + if isinstance(t, TensAdd): + res = TensAdd(*[gamma_trace(x) for x in t.args]) + return res + t = _simplify_single_line(t) + res = _trace_single_line(t) + return res + + +def _simplify_single_line(expression): + """ + Simplify single-line product of gamma matrices. + + Examples + ======== + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \ + LorentzIndex, _simplify_single_line + >>> from sympy.tensor.tensor import tensor_indices, TensorHead + >>> p = TensorHead('p', [LorentzIndex]) + >>> i0,i1 = tensor_indices('i0:2', LorentzIndex) + >>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0) + 0 + + """ + t1, t2 = extract_type_tens(expression, GammaMatrix) + if t1 != 1: + t1 = kahane_simplify(t1) + res = t1*t2 + return res + + +def _trace_single_line(t): + """ + Evaluate the trace of a single gamma matrix line inside a ``TensExpr``. + + Notes + ===== + + If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right`` + indices trace over them; otherwise traces are not implied (explain) + + + Examples + ======== + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \ + LorentzIndex, _trace_single_line + >>> from sympy.tensor.tensor import tensor_indices, TensorHead + >>> p = TensorHead('p', [LorentzIndex]) + >>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex) + >>> _trace_single_line(G(i0)*G(i1)) + 4*metric(i0, i1) + >>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0) + 0 + + """ + def _trace_single_line1(t): + t = t.sorted_components() + components = t.components + ncomps = len(components) + g = LorentzIndex.metric + # gamma matirices are in a[i:j] + hit = 0 + for i in range(ncomps): + if components[i] == GammaMatrix: + hit = 1 + break + + for j in range(i + hit, ncomps): + if components[j] != GammaMatrix: + break + else: + j = ncomps + numG = j - i + if numG == 0: + tcoeff = t.coeff + return t.nocoeff if tcoeff else t + if numG % 2 == 1: + return TensMul.from_data(S.Zero, [], [], []) + elif numG > 4: + # find the open matrix indices and connect them: + a = t.split() + ind1 = a[i].get_indices()[0] + ind2 = a[i + 1].get_indices()[0] + aa = a[:i] + a[i + 2:] + t1 = tensor_mul(*aa)*g(ind1, ind2) + t1 = t1.contract_metric(g) + args = [t1] + sign = 1 + for k in range(i + 2, j): + sign = -sign + ind2 = a[k].get_indices()[0] + aa = a[:i] + a[i + 1:k] + a[k + 1:] + t2 = sign*tensor_mul(*aa)*g(ind1, ind2) + t2 = t2.contract_metric(g) + t2 = simplify_gpgp(t2, False) + args.append(t2) + t3 = TensAdd(*args) + t3 = _trace_single_line(t3) + return t3 + else: + a = t.split() + t1 = _gamma_trace1(*a[i:j]) + a2 = a[:i] + a[j:] + t2 = tensor_mul(*a2) + t3 = t1*t2 + if not t3: + return t3 + t3 = t3.contract_metric(g) + return t3 + + t = t.expand() + if isinstance(t, TensAdd): + a = [_trace_single_line1(x)*x.coeff for x in t.args] + return TensAdd(*a) + elif isinstance(t, (Tensor, TensMul)): + r = t.coeff*_trace_single_line1(t) + return r + else: + return trace(t) + + +def _gamma_trace1(*a): + gctr = 4 # FIXME specific for d=4 + g = LorentzIndex.metric + if not a: + return gctr + n = len(a) + if n%2 == 1: + #return TensMul.from_data(S.Zero, [], [], []) + return S.Zero + if n == 2: + ind0 = a[0].get_indices()[0] + ind1 = a[1].get_indices()[0] + return gctr*g(ind0, ind1) + if n == 4: + ind0 = a[0].get_indices()[0] + ind1 = a[1].get_indices()[0] + ind2 = a[2].get_indices()[0] + ind3 = a[3].get_indices()[0] + + return gctr*(g(ind0, ind1)*g(ind2, ind3) - \ + g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2)) + + +def kahane_simplify(expression): + r""" + This function cancels contracted elements in a product of four + dimensional gamma matrices, resulting in an expression equal to the given + one, without the contracted gamma matrices. + + Parameters + ========== + + `expression` the tensor expression containing the gamma matrices to simplify. + + Notes + ===== + + If spinor indices are given, the matrices must be given in + the order given in the product. + + Algorithm + ========= + + The idea behind the algorithm is to use some well-known identities, + i.e., for contractions enclosing an even number of `\gamma` matrices + + `\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )` + + for an odd number of `\gamma` matrices + + `\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}` + + Instead of repeatedly applying these identities to cancel out all contracted indices, + it is possible to recognize the links that would result from such an operation, + the problem is thus reduced to a simple rearrangement of free gamma matrices. + + Examples + ======== + + When using, always remember that the original expression coefficient + has to be handled separately + + >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex + >>> from sympy.physics.hep.gamma_matrices import kahane_simplify + >>> from sympy.tensor.tensor import tensor_indices + >>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex) + >>> ta = G(i0)*G(-i0) + >>> kahane_simplify(ta) + Matrix([ + [4, 0, 0, 0], + [0, 4, 0, 0], + [0, 0, 4, 0], + [0, 0, 0, 4]]) + >>> tb = G(i0)*G(i1)*G(-i0) + >>> kahane_simplify(tb) + -2*GammaMatrix(i1) + >>> t = G(i0)*G(-i0) + >>> kahane_simplify(t) + Matrix([ + [4, 0, 0, 0], + [0, 4, 0, 0], + [0, 0, 4, 0], + [0, 0, 0, 4]]) + >>> t = G(i0)*G(-i0) + >>> kahane_simplify(t) + Matrix([ + [4, 0, 0, 0], + [0, 4, 0, 0], + [0, 0, 4, 0], + [0, 0, 0, 4]]) + + If there are no contractions, the same expression is returned + + >>> tc = G(i0)*G(i1) + >>> kahane_simplify(tc) + GammaMatrix(i0)*GammaMatrix(i1) + + References + ========== + + [1] Algorithm for Reducing Contracted Products of gamma Matrices, + Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968. + """ + + if isinstance(expression, Mul): + return expression + if isinstance(expression, TensAdd): + return TensAdd(*[kahane_simplify(arg) for arg in expression.args]) + + if isinstance(expression, Tensor): + return expression + + assert isinstance(expression, TensMul) + + gammas = expression.args + + for gamma in gammas: + assert gamma.component == GammaMatrix + + free = expression.free + # spinor_free = [_ for _ in expression.free_in_args if _[1] != 0] + + # if len(spinor_free) == 2: + # spinor_free.sort(key=lambda x: x[2]) + # assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2 + # assert spinor_free[0][2] == 0 + # elif spinor_free: + # raise ValueError('spinor indices do not match') + + dum = [] + for dum_pair in expression.dum: + if expression.index_types[dum_pair[0]] == LorentzIndex: + dum.append((dum_pair[0], dum_pair[1])) + + dum = sorted(dum) + + if len(dum) == 0: # or GammaMatrixHead: + # no contractions in `expression`, just return it. + return expression + + # find the `first_dum_pos`, i.e. the position of the first contracted + # gamma matrix, Kahane's algorithm as described in his paper requires the + # gamma matrix expression to start with a contracted gamma matrix, this is + # a workaround which ignores possible initial free indices, and re-adds + # them later. + + first_dum_pos = min(map(min, dum)) + + # for p1, p2, a1, a2 in expression.dum_in_args: + # if p1 != 0 or p2 != 0: + # # only Lorentz indices, skip Dirac indices: + # continue + # first_dum_pos = min(p1, p2) + # break + + total_number = len(free) + len(dum)*2 + number_of_contractions = len(dum) + + free_pos = [None]*total_number + for i in free: + free_pos[i[1]] = i[0] + + # `index_is_free` is a list of booleans, to identify index position + # and whether that index is free or dummy. + index_is_free = [False]*total_number + + for i, indx in enumerate(free): + index_is_free[indx[1]] = True + + # `links` is a dictionary containing the graph described in Kahane's paper, + # to every key correspond one or two values, representing the linked indices. + # All values in `links` are integers, negative numbers are used in the case + # where it is necessary to insert gamma matrices between free indices, in + # order to make Kahane's algorithm work (see paper). + links = {i: [] for i in range(first_dum_pos, total_number)} + + # `cum_sign` is a step variable to mark the sign of every index, see paper. + cum_sign = -1 + # `cum_sign_list` keeps storage for all `cum_sign` (every index). + cum_sign_list = [None]*total_number + block_free_count = 0 + + # multiply `resulting_coeff` by the coefficient parameter, the rest + # of the algorithm ignores a scalar coefficient. + resulting_coeff = S.One + + # initialize a list of lists of indices. The outer list will contain all + # additive tensor expressions, while the inner list will contain the + # free indices (rearranged according to the algorithm). + resulting_indices = [[]] + + # start to count the `connected_components`, which together with the number + # of contractions, determines a -1 or +1 factor to be multiplied. + connected_components = 1 + + # First loop: here we fill `cum_sign_list`, and draw the links + # among consecutive indices (they are stored in `links`). Links among + # non-consecutive indices will be drawn later. + for i, is_free in enumerate(index_is_free): + # if `expression` starts with free indices, they are ignored here; + # they are later added as they are to the beginning of all + # `resulting_indices` list of lists of indices. + if i < first_dum_pos: + continue + + if is_free: + block_free_count += 1 + # if previous index was free as well, draw an arch in `links`. + if block_free_count > 1: + links[i - 1].append(i) + links[i].append(i - 1) + else: + # Change the sign of the index (`cum_sign`) if the number of free + # indices preceding it is even. + cum_sign *= 1 if (block_free_count % 2) else -1 + if block_free_count == 0 and i != first_dum_pos: + # check if there are two consecutive dummy indices: + # in this case create virtual indices with negative position, + # these "virtual" indices represent the insertion of two + # gamma^0 matrices to separate consecutive dummy indices, as + # Kahane's algorithm requires dummy indices to be separated by + # free indices. The product of two gamma^0 matrices is unity, + # so the new expression being examined is the same as the + # original one. + if cum_sign == -1: + links[-1-i] = [-1-i+1] + links[-1-i+1] = [-1-i] + if (i - cum_sign) in links: + if i != first_dum_pos: + links[i].append(i - cum_sign) + if block_free_count != 0: + if i - cum_sign < len(index_is_free): + if index_is_free[i - cum_sign]: + links[i - cum_sign].append(i) + block_free_count = 0 + + cum_sign_list[i] = cum_sign + + # The previous loop has only created links between consecutive free indices, + # it is necessary to properly create links among dummy (contracted) indices, + # according to the rules described in Kahane's paper. There is only one exception + # to Kahane's rules: the negative indices, which handle the case of some + # consecutive free indices (Kahane's paper just describes dummy indices + # separated by free indices, hinting that free indices can be added without + # altering the expression result). + for i in dum: + # get the positions of the two contracted indices: + pos1 = i[0] + pos2 = i[1] + + # create Kahane's upper links, i.e. the upper arcs between dummy + # (i.e. contracted) indices: + links[pos1].append(pos2) + links[pos2].append(pos1) + + # create Kahane's lower links, this corresponds to the arcs below + # the line described in the paper: + + # first we move `pos1` and `pos2` according to the sign of the indices: + linkpos1 = pos1 + cum_sign_list[pos1] + linkpos2 = pos2 + cum_sign_list[pos2] + + # otherwise, perform some checks before creating the lower arcs: + + # make sure we are not exceeding the total number of indices: + if linkpos1 >= total_number: + continue + if linkpos2 >= total_number: + continue + + # make sure we are not below the first dummy index in `expression`: + if linkpos1 < first_dum_pos: + continue + if linkpos2 < first_dum_pos: + continue + + # check if the previous loop created "virtual" indices between dummy + # indices, in such a case relink `linkpos1` and `linkpos2`: + if (-1-linkpos1) in links: + linkpos1 = -1-linkpos1 + if (-1-linkpos2) in links: + linkpos2 = -1-linkpos2 + + # move only if not next to free index: + if linkpos1 >= 0 and not index_is_free[linkpos1]: + linkpos1 = pos1 + + if linkpos2 >=0 and not index_is_free[linkpos2]: + linkpos2 = pos2 + + # create the lower arcs: + if linkpos2 not in links[linkpos1]: + links[linkpos1].append(linkpos2) + if linkpos1 not in links[linkpos2]: + links[linkpos2].append(linkpos1) + + # This loop starts from the `first_dum_pos` index (first dummy index) + # walks through the graph deleting the visited indices from `links`, + # it adds a gamma matrix for every free index in encounters, while it + # completely ignores dummy indices and virtual indices. + pointer = first_dum_pos + previous_pointer = 0 + while True: + if pointer in links: + next_ones = links.pop(pointer) + else: + break + + if previous_pointer in next_ones: + next_ones.remove(previous_pointer) + + previous_pointer = pointer + + if next_ones: + pointer = next_ones[0] + else: + break + + if pointer == previous_pointer: + break + if pointer >=0 and free_pos[pointer] is not None: + for ri in resulting_indices: + ri.append(free_pos[pointer]) + + # The following loop removes the remaining connected components in `links`. + # If there are free indices inside a connected component, it gives a + # contribution to the resulting expression given by the factor + # `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's + # paper represented as {gamma_a, gamma_b, ... , gamma_z}, + # virtual indices are ignored. The variable `connected_components` is + # increased by one for every connected component this loop encounters. + + # If the connected component has virtual and dummy indices only + # (no free indices), it contributes to `resulting_indices` by a factor of two. + # The multiplication by two is a result of the + # factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper. + # Note: curly brackets are meant as in the paper, as a generalized + # multi-element anticommutator! + + while links: + connected_components += 1 + pointer = min(links.keys()) + previous_pointer = pointer + # the inner loop erases the visited indices from `links`, and it adds + # all free indices to `prepend_indices` list, virtual indices are + # ignored. + prepend_indices = [] + while True: + if pointer in links: + next_ones = links.pop(pointer) + else: + break + + if previous_pointer in next_ones: + if len(next_ones) > 1: + next_ones.remove(previous_pointer) + + previous_pointer = pointer + + if next_ones: + pointer = next_ones[0] + + if pointer >= first_dum_pos and free_pos[pointer] is not None: + prepend_indices.insert(0, free_pos[pointer]) + # if `prepend_indices` is void, it means there are no free indices + # in the loop (and it can be shown that there must be a virtual index), + # loops of virtual indices only contribute by a factor of two: + if len(prepend_indices) == 0: + resulting_coeff *= 2 + # otherwise, add the free indices in `prepend_indices` to + # the `resulting_indices`: + else: + expr1 = prepend_indices + expr2 = list(reversed(prepend_indices)) + resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)] + + # sign correction, as described in Kahane's paper: + resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1 + # power of two factor, as described in Kahane's paper: + resulting_coeff *= 2**(number_of_contractions) + + # If `first_dum_pos` is not zero, it means that there are trailing free gamma + # matrices in front of `expression`, so multiply by them: + resulting_indices = [ free_pos[0:first_dum_pos] + ri for ri in resulting_indices ] + + resulting_expr = S.Zero + for i in resulting_indices: + temp_expr = S.One + for j in i: + temp_expr *= GammaMatrix(j) + resulting_expr += temp_expr + + t = resulting_coeff * resulting_expr + t1 = None + if isinstance(t, TensAdd): + t1 = t.args[0] + elif isinstance(t, TensMul): + t1 = t + if t1: + pass + else: + t = eye(4)*t + return t diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/matrices.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..d91466220d63956053b91bd76b948ee677e7c191 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/matrices.py @@ -0,0 +1,176 @@ +"""Known matrices related to physics""" + +from sympy.core.numbers import I +from sympy.matrices.dense import MutableDenseMatrix as Matrix +from sympy.utilities.decorator import deprecated + + +def msigma(i): + r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Pauli_matrices + + Examples + ======== + + >>> from sympy.physics.matrices import msigma + >>> msigma(1) + Matrix([ + [0, 1], + [1, 0]]) + """ + if i == 1: + mat = ( + (0, 1), + (1, 0) + ) + elif i == 2: + mat = ( + (0, -I), + (I, 0) + ) + elif i == 3: + mat = ( + (1, 0), + (0, -1) + ) + else: + raise IndexError("Invalid Pauli index") + return Matrix(mat) + + +def pat_matrix(m, dx, dy, dz): + """Returns the Parallel Axis Theorem matrix to translate the inertia + matrix a distance of `(dx, dy, dz)` for a body of mass m. + + Examples + ======== + + To translate a body having a mass of 2 units a distance of 1 unit along + the `x`-axis we get: + + >>> from sympy.physics.matrices import pat_matrix + >>> pat_matrix(2, 1, 0, 0) + Matrix([ + [0, 0, 0], + [0, 2, 0], + [0, 0, 2]]) + + """ + dxdy = -dx*dy + dydz = -dy*dz + dzdx = -dz*dx + dxdx = dx**2 + dydy = dy**2 + dzdz = dz**2 + mat = ((dydy + dzdz, dxdy, dzdx), + (dxdy, dxdx + dzdz, dydz), + (dzdx, dydz, dydy + dxdx)) + return m*Matrix(mat) + + +def mgamma(mu, lower=False): + r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard + (Dirac) representation. + + Explanation + =========== + + If you want `\gamma_\mu`, use ``gamma(mu, True)``. + + We use a convention: + + `\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3` + + `\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5` + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Gamma_matrices + + Examples + ======== + + >>> from sympy.physics.matrices import mgamma + >>> mgamma(1) + Matrix([ + [ 0, 0, 0, 1], + [ 0, 0, 1, 0], + [ 0, -1, 0, 0], + [-1, 0, 0, 0]]) + """ + if mu not in (0, 1, 2, 3, 5): + raise IndexError("Invalid Dirac index") + if mu == 0: + mat = ( + (1, 0, 0, 0), + (0, 1, 0, 0), + (0, 0, -1, 0), + (0, 0, 0, -1) + ) + elif mu == 1: + mat = ( + (0, 0, 0, 1), + (0, 0, 1, 0), + (0, -1, 0, 0), + (-1, 0, 0, 0) + ) + elif mu == 2: + mat = ( + (0, 0, 0, -I), + (0, 0, I, 0), + (0, I, 0, 0), + (-I, 0, 0, 0) + ) + elif mu == 3: + mat = ( + (0, 0, 1, 0), + (0, 0, 0, -1), + (-1, 0, 0, 0), + (0, 1, 0, 0) + ) + elif mu == 5: + mat = ( + (0, 0, 1, 0), + (0, 0, 0, 1), + (1, 0, 0, 0), + (0, 1, 0, 0) + ) + m = Matrix(mat) + if lower: + if mu in (1, 2, 3, 5): + m = -m + return m + +#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field +#Theory +minkowski_tensor = Matrix( ( + (1, 0, 0, 0), + (0, -1, 0, 0), + (0, 0, -1, 0), + (0, 0, 0, -1) +)) + + +@deprecated( + """ + The sympy.physics.matrices.mdft method is deprecated. Use + sympy.DFT(n).as_explicit() instead. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-physics-mdft", +) +def mdft(n): + r""" + .. deprecated:: 1.9 + + Use DFT from sympy.matrices.expressions.fourier instead. + + To get identical behavior to ``mdft(n)``, use ``DFT(n).as_explicit()``. + """ + from sympy.matrices.expressions.fourier import DFT + return DFT(n).as_mutable() diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/paulialgebra.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/paulialgebra.py new file mode 100644 index 0000000000000000000000000000000000000000..300957354ff34907035aa1d1a48b00276230a1e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/paulialgebra.py @@ -0,0 +1,231 @@ +""" +This module implements Pauli algebra by subclassing Symbol. Only algebraic +properties of Pauli matrices are used (we do not use the Matrix class). + +See the documentation to the class Pauli for examples. + +References +========== + +.. [1] https://en.wikipedia.org/wiki/Pauli_matrices +""" + +from sympy.core.add import Add +from sympy.core.mul import Mul +from sympy.core.numbers import I +from sympy.core.power import Pow +from sympy.core.symbol import Symbol +from sympy.physics.quantum import TensorProduct + +__all__ = ['evaluate_pauli_product'] + + +def delta(i, j): + """ + Returns 1 if ``i == j``, else 0. + + This is used in the multiplication of Pauli matrices. + + Examples + ======== + + >>> from sympy.physics.paulialgebra import delta + >>> delta(1, 1) + 1 + >>> delta(2, 3) + 0 + """ + if i == j: + return 1 + else: + return 0 + + +def epsilon(i, j, k): + """ + Return 1 if i,j,k is equal to (1,2,3), (2,3,1), or (3,1,2); + -1 if ``i``,``j``,``k`` is equal to (1,3,2), (3,2,1), or (2,1,3); + else return 0. + + This is used in the multiplication of Pauli matrices. + + Examples + ======== + + >>> from sympy.physics.paulialgebra import epsilon + >>> epsilon(1, 2, 3) + 1 + >>> epsilon(1, 3, 2) + -1 + """ + if (i, j, k) in ((1, 2, 3), (2, 3, 1), (3, 1, 2)): + return 1 + elif (i, j, k) in ((1, 3, 2), (3, 2, 1), (2, 1, 3)): + return -1 + else: + return 0 + + +class Pauli(Symbol): + """ + The class representing algebraic properties of Pauli matrices. + + Explanation + =========== + + The symbol used to display the Pauli matrices can be changed with an + optional parameter ``label="sigma"``. Pauli matrices with different + ``label`` attributes cannot multiply together. + + If the left multiplication of symbol or number with Pauli matrix is needed, + please use parentheses to separate Pauli and symbolic multiplication + (for example: 2*I*(Pauli(3)*Pauli(2))). + + Another variant is to use evaluate_pauli_product function to evaluate + the product of Pauli matrices and other symbols (with commutative + multiply rules). + + See Also + ======== + + evaluate_pauli_product + + Examples + ======== + + >>> from sympy.physics.paulialgebra import Pauli + >>> Pauli(1) + sigma1 + >>> Pauli(1)*Pauli(2) + I*sigma3 + >>> Pauli(1)*Pauli(1) + 1 + >>> Pauli(3)**4 + 1 + >>> Pauli(1)*Pauli(2)*Pauli(3) + I + + >>> from sympy.physics.paulialgebra import Pauli + >>> Pauli(1, label="tau") + tau1 + >>> Pauli(1)*Pauli(2, label="tau") + sigma1*tau2 + >>> Pauli(1, label="tau")*Pauli(2, label="tau") + I*tau3 + + >>> from sympy import I + >>> I*(Pauli(2)*Pauli(3)) + -sigma1 + + >>> from sympy.physics.paulialgebra import evaluate_pauli_product + >>> f = I*Pauli(2)*Pauli(3) + >>> f + I*sigma2*sigma3 + >>> evaluate_pauli_product(f) + -sigma1 + """ + + __slots__ = ("i", "label") + + def __new__(cls, i, label="sigma"): + if i not in [1, 2, 3]: + raise IndexError("Invalid Pauli index") + obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True) + obj.i = i + obj.label = label + return obj + + def __getnewargs_ex__(self): + return (self.i, self.label), {} + + def _hashable_content(self): + return (self.i, self.label) + + # FIXME don't work for -I*Pauli(2)*Pauli(3) + def __mul__(self, other): + if isinstance(other, Pauli): + j = self.i + k = other.i + jlab = self.label + klab = other.label + + if jlab == klab: + return delta(j, k) \ + + I*epsilon(j, k, 1)*Pauli(1,jlab) \ + + I*epsilon(j, k, 2)*Pauli(2,jlab) \ + + I*epsilon(j, k, 3)*Pauli(3,jlab) + return super().__mul__(other) + + def _eval_power(b, e): + if e.is_Integer and e.is_positive: + return super().__pow__(int(e) % 2) + + +def evaluate_pauli_product(arg): + '''Help function to evaluate Pauli matrices product + with symbolic objects. + + Parameters + ========== + + arg: symbolic expression that contains Paulimatrices + + Examples + ======== + + >>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product + >>> from sympy import I + >>> evaluate_pauli_product(I*Pauli(1)*Pauli(2)) + -sigma3 + + >>> from sympy.abc import x + >>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1)) + -I*x**2*sigma3 + ''' + start = arg + end = arg + + if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli): + if arg.args[1].is_odd: + return arg.args[0] + else: + return 1 + + if isinstance(arg, Add): + return Add(*[evaluate_pauli_product(part) for part in arg.args]) + + if isinstance(arg, TensorProduct): + return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args]) + + elif not(isinstance(arg, Mul)): + return arg + + while not start == end or start == arg and end == arg: + start = end + + tmp = start.as_coeff_mul() + sigma_product = 1 + com_product = 1 + keeper = 1 + + for el in tmp[1]: + if isinstance(el, Pauli): + sigma_product *= el + elif not el.is_commutative: + if isinstance(el, Pow) and isinstance(el.args[0], Pauli): + if el.args[1].is_odd: + sigma_product *= el.args[0] + elif isinstance(el, TensorProduct): + keeper = keeper*sigma_product*\ + TensorProduct( + *[evaluate_pauli_product(part) for part in el.args] + ) + sigma_product = 1 + else: + keeper = keeper*sigma_product*el + sigma_product = 1 + else: + com_product *= el + end = tmp[0]*keeper*sigma_product*com_product + if end == arg: break + return end diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/pring.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/pring.py new file mode 100644 index 0000000000000000000000000000000000000000..325f4ff98a8c9fc428b4e332153af533f4d199ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/pring.py @@ -0,0 +1,94 @@ +from sympy.core.numbers import (I, pi) +from sympy.core.singleton import S +from sympy.functions.elementary.exponential import exp +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.physics.quantum.constants import hbar + + +def wavefunction(n, x): + """ + Returns the wavefunction for particle on ring. + + Parameters + ========== + + n : The quantum number. + Here ``n`` can be positive as well as negative + which can be used to describe the direction of motion of particle. + x : + The angle. + + Examples + ======== + + >>> from sympy.physics.pring import wavefunction + >>> from sympy import Symbol, integrate, pi + >>> x=Symbol("x") + >>> wavefunction(1, x) + sqrt(2)*exp(I*x)/(2*sqrt(pi)) + >>> wavefunction(2, x) + sqrt(2)*exp(2*I*x)/(2*sqrt(pi)) + >>> wavefunction(3, x) + sqrt(2)*exp(3*I*x)/(2*sqrt(pi)) + + The normalization of the wavefunction is: + + >>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi)) + 1 + >>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi)) + 1 + + References + ========== + + .. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum + Mechanics (4th ed.). Pages 71-73. + + """ + # sympify arguments + n, x = S(n), S(x) + return exp(n * I * x) / sqrt(2 * pi) + + +def energy(n, m, r): + """ + Returns the energy of the state corresponding to quantum number ``n``. + + E=(n**2 * (hcross)**2) / (2 * m * r**2) + + Parameters + ========== + + n : + The quantum number. + m : + Mass of the particle. + r : + Radius of circle. + + Examples + ======== + + >>> from sympy.physics.pring import energy + >>> from sympy import Symbol + >>> m=Symbol("m") + >>> r=Symbol("r") + >>> energy(1, m, r) + hbar**2/(2*m*r**2) + >>> energy(2, m, r) + 2*hbar**2/(m*r**2) + >>> energy(-2, 2.0, 3.0) + 0.111111111111111*hbar**2 + + References + ========== + + .. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum + Mechanics (4th ed.). Pages 71-73. + + """ + n, m, r = S(n), S(m), S(r) + if n.is_integer: + return (n**2 * hbar**2) / (2 * m * r**2) + else: + raise ValueError("'n' must be integer") diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/qho_1d.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/qho_1d.py new file mode 100644 index 0000000000000000000000000000000000000000..f418e0e954656923fbfa64cea2145581ddf65aea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/qho_1d.py @@ -0,0 +1,88 @@ +from sympy.core import S, pi, Rational +from sympy.functions import hermite, sqrt, exp, factorial, Abs +from sympy.physics.quantum.constants import hbar + + +def psi_n(n, x, m, omega): + """ + Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator. + + Parameters + ========== + + n : + the "nodal" quantum number. Corresponds to the number of nodes in the + wavefunction. ``n >= 0`` + x : + x coordinate. + m : + Mass of the particle. + omega : + Angular frequency of the oscillator. + + Examples + ======== + + >>> from sympy.physics.qho_1d import psi_n + >>> from sympy.abc import m, x, omega + >>> psi_n(0, x, m, omega) + (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4)) + + """ + + # sympify arguments + n, x, m, omega = map(S, [n, x, m, omega]) + nu = m * omega / hbar + # normalization coefficient + C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n))) + + return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x) + + +def E_n(n, omega): + """ + Returns the Energy of the One-dimensional harmonic oscillator. + + Parameters + ========== + + n : + The "nodal" quantum number. + omega : + The harmonic oscillator angular frequency. + + Notes + ===== + + The unit of the returned value matches the unit of hw, since the energy is + calculated as: + + E_n = hbar * omega*(n + 1/2) + + Examples + ======== + + >>> from sympy.physics.qho_1d import E_n + >>> from sympy.abc import x, omega + >>> E_n(x, omega) + hbar*omega*(x + 1/2) + """ + + return hbar * omega * (n + S.Half) + + +def coherent_state(n, alpha): + """ + Returns for the coherent states of 1D harmonic oscillator. + See https://en.wikipedia.org/wiki/Coherent_states + + Parameters + ========== + + n : + The "nodal" quantum number. + alpha : + The eigen value of annihilation operator. + """ + + return exp(- Abs(alpha)**2/2)*(alpha**n)/sqrt(factorial(n)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/secondquant.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/secondquant.py new file mode 100644 index 0000000000000000000000000000000000000000..464fae9acee368b4f68f1436ffd3bc419b0a72d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/secondquant.py @@ -0,0 +1,3114 @@ +""" +Second quantization operators and states for bosons. + +This follow the formulation of Fetter and Welecka, "Quantum Theory +of Many-Particle Systems." +""" +from collections import defaultdict + +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.cache import cacheit +from sympy.core.containers import Tuple +from sympy.core.expr import Expr +from sympy.core.function import Function +from sympy.core.mul import Mul +from sympy.core.numbers import I +from sympy.core.power import Pow +from sympy.core.singleton import S +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import Dummy, Symbol +from sympy.core.sympify import sympify +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.matrices.dense import zeros +from sympy.printing.str import StrPrinter +from sympy.utilities.iterables import has_dups + +__all__ = [ + 'Dagger', + 'KroneckerDelta', + 'BosonicOperator', + 'AnnihilateBoson', + 'CreateBoson', + 'AnnihilateFermion', + 'CreateFermion', + 'FockState', + 'FockStateBra', + 'FockStateKet', + 'FockStateBosonKet', + 'FockStateBosonBra', + 'FockStateFermionKet', + 'FockStateFermionBra', + 'BBra', + 'BKet', + 'FBra', + 'FKet', + 'F', + 'Fd', + 'B', + 'Bd', + 'apply_operators', + 'InnerProduct', + 'BosonicBasis', + 'VarBosonicBasis', + 'FixedBosonicBasis', + 'Commutator', + 'matrix_rep', + 'contraction', + 'wicks', + 'NO', + 'evaluate_deltas', + 'AntiSymmetricTensor', + 'substitute_dummies', + 'PermutationOperator', + 'simplify_index_permutations', +] + + +class SecondQuantizationError(Exception): + pass + + +class AppliesOnlyToSymbolicIndex(SecondQuantizationError): + pass + + +class ContractionAppliesOnlyToFermions(SecondQuantizationError): + pass + + +class ViolationOfPauliPrinciple(SecondQuantizationError): + pass + + +class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError): + pass + + +class WicksTheoremDoesNotApply(SecondQuantizationError): + pass + + +class Dagger(Expr): + """ + Hermitian conjugate of creation/annihilation operators. + + Examples + ======== + + >>> from sympy import I + >>> from sympy.physics.secondquant import Dagger, B, Bd + >>> Dagger(2*I) + -2*I + >>> Dagger(B(0)) + CreateBoson(0) + >>> Dagger(Bd(0)) + AnnihilateBoson(0) + + """ + + def __new__(cls, arg): + arg = sympify(arg) + r = cls.eval(arg) + if isinstance(r, Basic): + return r + obj = Basic.__new__(cls, arg) + return obj + + @classmethod + def eval(cls, arg): + """ + Evaluates the Dagger instance. + + Examples + ======== + + >>> from sympy import I + >>> from sympy.physics.secondquant import Dagger, B, Bd + >>> Dagger(2*I) + -2*I + >>> Dagger(B(0)) + CreateBoson(0) + >>> Dagger(Bd(0)) + AnnihilateBoson(0) + + The eval() method is called automatically. + + """ + dagger = getattr(arg, '_dagger_', None) + if dagger is not None: + return dagger() + if isinstance(arg, Basic): + if arg.is_Add: + return Add(*tuple(map(Dagger, arg.args))) + if arg.is_Mul: + return Mul(*tuple(map(Dagger, reversed(arg.args)))) + if arg.is_Number: + return arg + if arg.is_Pow: + return Pow(Dagger(arg.args[0]), arg.args[1]) + if arg == I: + return -arg + else: + return None + + def _dagger_(self): + return self.args[0] + + +class TensorSymbol(Expr): + + is_commutative = True + + +class AntiSymmetricTensor(TensorSymbol): + """Stores upper and lower indices in separate Tuple's. + + Each group of indices is assumed to be antisymmetric. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import AntiSymmetricTensor + >>> i, j = symbols('i j', below_fermi=True) + >>> a, b = symbols('a b', above_fermi=True) + >>> AntiSymmetricTensor('v', (a, i), (b, j)) + AntiSymmetricTensor(v, (a, i), (b, j)) + >>> AntiSymmetricTensor('v', (i, a), (b, j)) + -AntiSymmetricTensor(v, (a, i), (b, j)) + + As you can see, the indices are automatically sorted to a canonical form. + + """ + + def __new__(cls, symbol, upper, lower): + + try: + upper, signu = _sort_anticommuting_fermions( + upper, key=cls._sortkey) + lower, signl = _sort_anticommuting_fermions( + lower, key=cls._sortkey) + + except ViolationOfPauliPrinciple: + return S.Zero + + symbol = sympify(symbol) + upper = Tuple(*upper) + lower = Tuple(*lower) + + if (signu + signl) % 2: + return -TensorSymbol.__new__(cls, symbol, upper, lower) + else: + + return TensorSymbol.__new__(cls, symbol, upper, lower) + + @classmethod + def _sortkey(cls, index): + """Key for sorting of indices. + + particle < hole < general + + FIXME: This is a bottle-neck, can we do it faster? + """ + h = hash(index) + label = str(index) + if isinstance(index, Dummy): + if index.assumptions0.get('above_fermi'): + return (20, label, h) + elif index.assumptions0.get('below_fermi'): + return (21, label, h) + else: + return (22, label, h) + + if index.assumptions0.get('above_fermi'): + return (10, label, h) + elif index.assumptions0.get('below_fermi'): + return (11, label, h) + else: + return (12, label, h) + + def _latex(self, printer): + return "{%s^{%s}_{%s}}" % ( + self.symbol, + "".join([ i.name for i in self.args[1]]), + "".join([ i.name for i in self.args[2]]) + ) + + @property + def symbol(self): + """ + Returns the symbol of the tensor. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import AntiSymmetricTensor + >>> i, j = symbols('i,j', below_fermi=True) + >>> a, b = symbols('a,b', above_fermi=True) + >>> AntiSymmetricTensor('v', (a, i), (b, j)) + AntiSymmetricTensor(v, (a, i), (b, j)) + >>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol + v + + """ + return self.args[0] + + @property + def upper(self): + """ + Returns the upper indices. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import AntiSymmetricTensor + >>> i, j = symbols('i,j', below_fermi=True) + >>> a, b = symbols('a,b', above_fermi=True) + >>> AntiSymmetricTensor('v', (a, i), (b, j)) + AntiSymmetricTensor(v, (a, i), (b, j)) + >>> AntiSymmetricTensor('v', (a, i), (b, j)).upper + (a, i) + + + """ + return self.args[1] + + @property + def lower(self): + """ + Returns the lower indices. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import AntiSymmetricTensor + >>> i, j = symbols('i,j', below_fermi=True) + >>> a, b = symbols('a,b', above_fermi=True) + >>> AntiSymmetricTensor('v', (a, i), (b, j)) + AntiSymmetricTensor(v, (a, i), (b, j)) + >>> AntiSymmetricTensor('v', (a, i), (b, j)).lower + (b, j) + + """ + return self.args[2] + + def __str__(self): + return "%s(%s,%s)" % self.args + + +class SqOperator(Expr): + """ + Base class for Second Quantization operators. + """ + + op_symbol = 'sq' + + is_commutative = False + + def __new__(cls, k): + obj = Basic.__new__(cls, sympify(k)) + return obj + + @property + def state(self): + """ + Returns the state index related to this operator. + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F, Fd, B, Bd + >>> p = Symbol('p') + >>> F(p).state + p + >>> Fd(p).state + p + >>> B(p).state + p + >>> Bd(p).state + p + + """ + return self.args[0] + + @property + def is_symbolic(self): + """ + Returns True if the state is a symbol (as opposed to a number). + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> p = Symbol('p') + >>> F(p).is_symbolic + True + >>> F(1).is_symbolic + False + + """ + if self.state.is_Integer: + return False + else: + return True + + def __repr__(self): + return NotImplemented + + def __str__(self): + return "%s(%r)" % (self.op_symbol, self.state) + + def apply_operator(self, state): + """ + Applies an operator to itself. + """ + raise NotImplementedError('implement apply_operator in a subclass') + + +class BosonicOperator(SqOperator): + pass + + +class Annihilator(SqOperator): + pass + + +class Creator(SqOperator): + pass + + +class AnnihilateBoson(BosonicOperator, Annihilator): + """ + Bosonic annihilation operator. + + Examples + ======== + + >>> from sympy.physics.secondquant import B + >>> from sympy.abc import x + >>> B(x) + AnnihilateBoson(x) + """ + + op_symbol = 'b' + + def _dagger_(self): + return CreateBoson(self.state) + + def apply_operator(self, state): + """ + Apply state to self if self is not symbolic and state is a FockStateKet, else + multiply self by state. + + Examples + ======== + + >>> from sympy.physics.secondquant import B, BKet + >>> from sympy.abc import x, y, n + >>> B(x).apply_operator(y) + y*AnnihilateBoson(x) + >>> B(0).apply_operator(BKet((n,))) + sqrt(n)*FockStateBosonKet((n - 1,)) + + """ + if not self.is_symbolic and isinstance(state, FockStateKet): + element = self.state + amp = sqrt(state[element]) + return amp*state.down(element) + else: + return Mul(self, state) + + def __repr__(self): + return "AnnihilateBoson(%s)" % self.state + + def _latex(self, printer): + if self.state is S.Zero: + return "b_{0}" + else: + return "b_{%s}" % self.state.name + +class CreateBoson(BosonicOperator, Creator): + """ + Bosonic creation operator. + """ + + op_symbol = 'b+' + + def _dagger_(self): + return AnnihilateBoson(self.state) + + def apply_operator(self, state): + """ + Apply state to self if self is not symbolic and state is a FockStateKet, else + multiply self by state. + + Examples + ======== + + >>> from sympy.physics.secondquant import B, Dagger, BKet + >>> from sympy.abc import x, y, n + >>> Dagger(B(x)).apply_operator(y) + y*CreateBoson(x) + >>> B(0).apply_operator(BKet((n,))) + sqrt(n)*FockStateBosonKet((n - 1,)) + """ + if not self.is_symbolic and isinstance(state, FockStateKet): + element = self.state + amp = sqrt(state[element] + 1) + return amp*state.up(element) + else: + return Mul(self, state) + + def __repr__(self): + return "CreateBoson(%s)" % self.state + + def _latex(self, printer): + if self.state is S.Zero: + return "{b^\\dagger_{0}}" + else: + return "{b^\\dagger_{%s}}" % self.state.name + +B = AnnihilateBoson +Bd = CreateBoson + + +class FermionicOperator(SqOperator): + + @property + def is_restricted(self): + """ + Is this FermionicOperator restricted with respect to fermi level? + + Returns + ======= + + 1 : restricted to orbits above fermi + 0 : no restriction + -1 : restricted to orbits below fermi + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F, Fd + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_restricted + 1 + >>> Fd(a).is_restricted + 1 + >>> F(i).is_restricted + -1 + >>> Fd(i).is_restricted + -1 + >>> F(p).is_restricted + 0 + >>> Fd(p).is_restricted + 0 + + """ + ass = self.args[0].assumptions0 + if ass.get("below_fermi"): + return -1 + if ass.get("above_fermi"): + return 1 + return 0 + + @property + def is_above_fermi(self): + """ + Does the index of this FermionicOperator allow values above fermi? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_above_fermi + True + >>> F(i).is_above_fermi + False + >>> F(p).is_above_fermi + True + + Note + ==== + + The same applies to creation operators Fd + + """ + return not self.args[0].assumptions0.get("below_fermi") + + @property + def is_below_fermi(self): + """ + Does the index of this FermionicOperator allow values below fermi? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_below_fermi + False + >>> F(i).is_below_fermi + True + >>> F(p).is_below_fermi + True + + The same applies to creation operators Fd + + """ + return not self.args[0].assumptions0.get("above_fermi") + + @property + def is_only_below_fermi(self): + """ + Is the index of this FermionicOperator restricted to values below fermi? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_only_below_fermi + False + >>> F(i).is_only_below_fermi + True + >>> F(p).is_only_below_fermi + False + + The same applies to creation operators Fd + """ + return self.is_below_fermi and not self.is_above_fermi + + @property + def is_only_above_fermi(self): + """ + Is the index of this FermionicOperator restricted to values above fermi? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_only_above_fermi + True + >>> F(i).is_only_above_fermi + False + >>> F(p).is_only_above_fermi + False + + The same applies to creation operators Fd + """ + return self.is_above_fermi and not self.is_below_fermi + + def _sortkey(self): + h = hash(self) + label = str(self.args[0]) + + if self.is_only_q_creator: + return 1, label, h + if self.is_only_q_annihilator: + return 4, label, h + if isinstance(self, Annihilator): + return 3, label, h + if isinstance(self, Creator): + return 2, label, h + + +class AnnihilateFermion(FermionicOperator, Annihilator): + """ + Fermionic annihilation operator. + """ + + op_symbol = 'f' + + def _dagger_(self): + return CreateFermion(self.state) + + def apply_operator(self, state): + """ + Apply state to self if self is not symbolic and state is a FockStateKet, else + multiply self by state. + + Examples + ======== + + >>> from sympy.physics.secondquant import B, Dagger, BKet + >>> from sympy.abc import x, y, n + >>> Dagger(B(x)).apply_operator(y) + y*CreateBoson(x) + >>> B(0).apply_operator(BKet((n,))) + sqrt(n)*FockStateBosonKet((n - 1,)) + """ + if isinstance(state, FockStateFermionKet): + element = self.state + return state.down(element) + + elif isinstance(state, Mul): + c_part, nc_part = state.args_cnc() + if isinstance(nc_part[0], FockStateFermionKet): + element = self.state + return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:])) + else: + return Mul(self, state) + + else: + return Mul(self, state) + + @property + def is_q_creator(self): + """ + Can we create a quasi-particle? (create hole or create particle) + If so, would that be above or below the fermi surface? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_q_creator + 0 + >>> F(i).is_q_creator + -1 + >>> F(p).is_q_creator + -1 + + """ + if self.is_below_fermi: + return -1 + return 0 + + @property + def is_q_annihilator(self): + """ + Can we destroy a quasi-particle? (annihilate hole or annihilate particle) + If so, would that be above or below the fermi surface? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=1) + >>> i = Symbol('i', below_fermi=1) + >>> p = Symbol('p') + + >>> F(a).is_q_annihilator + 1 + >>> F(i).is_q_annihilator + 0 + >>> F(p).is_q_annihilator + 1 + + """ + if self.is_above_fermi: + return 1 + return 0 + + @property + def is_only_q_creator(self): + """ + Always create a quasi-particle? (create hole or create particle) + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_only_q_creator + False + >>> F(i).is_only_q_creator + True + >>> F(p).is_only_q_creator + False + + """ + return self.is_only_below_fermi + + @property + def is_only_q_annihilator(self): + """ + Always destroy a quasi-particle? (annihilate hole or annihilate particle) + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import F + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> F(a).is_only_q_annihilator + True + >>> F(i).is_only_q_annihilator + False + >>> F(p).is_only_q_annihilator + False + + """ + return self.is_only_above_fermi + + def __repr__(self): + return "AnnihilateFermion(%s)" % self.state + + def _latex(self, printer): + if self.state is S.Zero: + return "a_{0}" + else: + return "a_{%s}" % self.state.name + + +class CreateFermion(FermionicOperator, Creator): + """ + Fermionic creation operator. + """ + + op_symbol = 'f+' + + def _dagger_(self): + return AnnihilateFermion(self.state) + + def apply_operator(self, state): + """ + Apply state to self if self is not symbolic and state is a FockStateKet, else + multiply self by state. + + Examples + ======== + + >>> from sympy.physics.secondquant import B, Dagger, BKet + >>> from sympy.abc import x, y, n + >>> Dagger(B(x)).apply_operator(y) + y*CreateBoson(x) + >>> B(0).apply_operator(BKet((n,))) + sqrt(n)*FockStateBosonKet((n - 1,)) + """ + if isinstance(state, FockStateFermionKet): + element = self.state + return state.up(element) + + elif isinstance(state, Mul): + c_part, nc_part = state.args_cnc() + if isinstance(nc_part[0], FockStateFermionKet): + element = self.state + return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:])) + + return Mul(self, state) + + @property + def is_q_creator(self): + """ + Can we create a quasi-particle? (create hole or create particle) + If so, would that be above or below the fermi surface? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import Fd + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> Fd(a).is_q_creator + 1 + >>> Fd(i).is_q_creator + 0 + >>> Fd(p).is_q_creator + 1 + + """ + if self.is_above_fermi: + return 1 + return 0 + + @property + def is_q_annihilator(self): + """ + Can we destroy a quasi-particle? (annihilate hole or annihilate particle) + If so, would that be above or below the fermi surface? + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import Fd + >>> a = Symbol('a', above_fermi=1) + >>> i = Symbol('i', below_fermi=1) + >>> p = Symbol('p') + + >>> Fd(a).is_q_annihilator + 0 + >>> Fd(i).is_q_annihilator + -1 + >>> Fd(p).is_q_annihilator + -1 + + """ + if self.is_below_fermi: + return -1 + return 0 + + @property + def is_only_q_creator(self): + """ + Always create a quasi-particle? (create hole or create particle) + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import Fd + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> Fd(a).is_only_q_creator + True + >>> Fd(i).is_only_q_creator + False + >>> Fd(p).is_only_q_creator + False + + """ + return self.is_only_above_fermi + + @property + def is_only_q_annihilator(self): + """ + Always destroy a quasi-particle? (annihilate hole or annihilate particle) + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import Fd + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> Fd(a).is_only_q_annihilator + False + >>> Fd(i).is_only_q_annihilator + True + >>> Fd(p).is_only_q_annihilator + False + + """ + return self.is_only_below_fermi + + def __repr__(self): + return "CreateFermion(%s)" % self.state + + def _latex(self, printer): + if self.state is S.Zero: + return "{a^\\dagger_{0}}" + else: + return "{a^\\dagger_{%s}}" % self.state.name + +Fd = CreateFermion +F = AnnihilateFermion + + +class FockState(Expr): + """ + Many particle Fock state with a sequence of occupation numbers. + + Anywhere you can have a FockState, you can also have S.Zero. + All code must check for this! + + Base class to represent FockStates. + """ + is_commutative = False + + def __new__(cls, occupations): + """ + occupations is a list with two possible meanings: + + - For bosons it is a list of occupation numbers. + Element i is the number of particles in state i. + + - For fermions it is a list of occupied orbits. + Element 0 is the state that was occupied first, element i + is the i'th occupied state. + """ + occupations = list(map(sympify, occupations)) + obj = Basic.__new__(cls, Tuple(*occupations)) + return obj + + def __getitem__(self, i): + i = int(i) + return self.args[0][i] + + def __repr__(self): + return ("FockState(%r)") % (self.args) + + def __str__(self): + return "%s%r%s" % (getattr(self, 'lbracket', ""), self._labels(), getattr(self, 'rbracket', "")) + + def _labels(self): + return self.args[0] + + def __len__(self): + return len(self.args[0]) + + def _latex(self, printer): + return "%s%s%s" % (getattr(self, 'lbracket_latex', ""), printer._print(self._labels()), getattr(self, 'rbracket_latex', "")) + + +class BosonState(FockState): + """ + Base class for FockStateBoson(Ket/Bra). + """ + + def up(self, i): + """ + Performs the action of a creation operator. + + Examples + ======== + + >>> from sympy.physics.secondquant import BBra + >>> b = BBra([1, 2]) + >>> b + FockStateBosonBra((1, 2)) + >>> b.up(1) + FockStateBosonBra((1, 3)) + """ + i = int(i) + new_occs = list(self.args[0]) + new_occs[i] = new_occs[i] + S.One + return self.__class__(new_occs) + + def down(self, i): + """ + Performs the action of an annihilation operator. + + Examples + ======== + + >>> from sympy.physics.secondquant import BBra + >>> b = BBra([1, 2]) + >>> b + FockStateBosonBra((1, 2)) + >>> b.down(1) + FockStateBosonBra((1, 1)) + """ + i = int(i) + new_occs = list(self.args[0]) + if new_occs[i] == S.Zero: + return S.Zero + else: + new_occs[i] = new_occs[i] - S.One + return self.__class__(new_occs) + + +class FermionState(FockState): + """ + Base class for FockStateFermion(Ket/Bra). + """ + + fermi_level = 0 + + def __new__(cls, occupations, fermi_level=0): + occupations = list(map(sympify, occupations)) + if len(occupations) > 1: + try: + (occupations, sign) = _sort_anticommuting_fermions( + occupations, key=hash) + except ViolationOfPauliPrinciple: + return S.Zero + else: + sign = 0 + + cls.fermi_level = fermi_level + + if cls._count_holes(occupations) > fermi_level: + return S.Zero + + if sign % 2: + return S.NegativeOne*FockState.__new__(cls, occupations) + else: + return FockState.__new__(cls, occupations) + + def up(self, i): + """ + Performs the action of a creation operator. + + Explanation + =========== + + If below fermi we try to remove a hole, + if above fermi we try to create a particle. + + If general index p we return ``Kronecker(p,i)*self`` + where ``i`` is a new symbol with restriction above or below. + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import FKet + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + >>> FKet([]).up(a) + FockStateFermionKet((a,)) + + A creator acting on vacuum below fermi vanishes + + >>> FKet([]).up(i) + 0 + + + """ + present = i in self.args[0] + + if self._only_above_fermi(i): + if present: + return S.Zero + else: + return self._add_orbit(i) + elif self._only_below_fermi(i): + if present: + return self._remove_orbit(i) + else: + return S.Zero + else: + if present: + hole = Dummy("i", below_fermi=True) + return KroneckerDelta(i, hole)*self._remove_orbit(i) + else: + particle = Dummy("a", above_fermi=True) + return KroneckerDelta(i, particle)*self._add_orbit(i) + + def down(self, i): + """ + Performs the action of an annihilation operator. + + Explanation + =========== + + If below fermi we try to create a hole, + If above fermi we try to remove a particle. + + If general index p we return ``Kronecker(p,i)*self`` + where ``i`` is a new symbol with restriction above or below. + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.secondquant import FKet + >>> a = Symbol('a', above_fermi=True) + >>> i = Symbol('i', below_fermi=True) + >>> p = Symbol('p') + + An annihilator acting on vacuum above fermi vanishes + + >>> FKet([]).down(a) + 0 + + Also below fermi, it vanishes, unless we specify a fermi level > 0 + + >>> FKet([]).down(i) + 0 + >>> FKet([],4).down(i) + FockStateFermionKet((i,)) + + """ + present = i in self.args[0] + + if self._only_above_fermi(i): + if present: + return self._remove_orbit(i) + else: + return S.Zero + + elif self._only_below_fermi(i): + if present: + return S.Zero + else: + return self._add_orbit(i) + else: + if present: + hole = Dummy("i", below_fermi=True) + return KroneckerDelta(i, hole)*self._add_orbit(i) + else: + particle = Dummy("a", above_fermi=True) + return KroneckerDelta(i, particle)*self._remove_orbit(i) + + @classmethod + def _only_below_fermi(cls, i): + """ + Tests if given orbit is only below fermi surface. + + If nothing can be concluded we return a conservative False. + """ + if i.is_number: + return i <= cls.fermi_level + if i.assumptions0.get('below_fermi'): + return True + return False + + @classmethod + def _only_above_fermi(cls, i): + """ + Tests if given orbit is only above fermi surface. + + If fermi level has not been set we return True. + If nothing can be concluded we return a conservative False. + """ + if i.is_number: + return i > cls.fermi_level + if i.assumptions0.get('above_fermi'): + return True + return not cls.fermi_level + + def _remove_orbit(self, i): + """ + Removes particle/fills hole in orbit i. No input tests performed here. + """ + new_occs = list(self.args[0]) + pos = new_occs.index(i) + del new_occs[pos] + if (pos) % 2: + return S.NegativeOne*self.__class__(new_occs, self.fermi_level) + else: + return self.__class__(new_occs, self.fermi_level) + + def _add_orbit(self, i): + """ + Adds particle/creates hole in orbit i. No input tests performed here. + """ + return self.__class__((i,) + self.args[0], self.fermi_level) + + @classmethod + def _count_holes(cls, list): + """ + Returns the number of identified hole states in list. + """ + return len([i for i in list if cls._only_below_fermi(i)]) + + def _negate_holes(self, list): + return tuple([-i if i <= self.fermi_level else i for i in list]) + + def __repr__(self): + if self.fermi_level: + return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level) + else: + return "FockStateKet(%r)" % (self.args[0],) + + def _labels(self): + return self._negate_holes(self.args[0]) + + +class FockStateKet(FockState): + """ + Representation of a ket. + """ + lbracket = '|' + rbracket = '>' + lbracket_latex = r'\left|' + rbracket_latex = r'\right\rangle' + + +class FockStateBra(FockState): + """ + Representation of a bra. + """ + lbracket = '<' + rbracket = '|' + lbracket_latex = r'\left\langle' + rbracket_latex = r'\right|' + + def __mul__(self, other): + if isinstance(other, FockStateKet): + return InnerProduct(self, other) + else: + return Expr.__mul__(self, other) + + +class FockStateBosonKet(BosonState, FockStateKet): + """ + Many particle Fock state with a sequence of occupation numbers. + + Occupation numbers can be any integer >= 0. + + Examples + ======== + + >>> from sympy.physics.secondquant import BKet + >>> BKet([1, 2]) + FockStateBosonKet((1, 2)) + """ + def _dagger_(self): + return FockStateBosonBra(*self.args) + + +class FockStateBosonBra(BosonState, FockStateBra): + """ + Describes a collection of BosonBra particles. + + Examples + ======== + + >>> from sympy.physics.secondquant import BBra + >>> BBra([1, 2]) + FockStateBosonBra((1, 2)) + """ + def _dagger_(self): + return FockStateBosonKet(*self.args) + + +class FockStateFermionKet(FermionState, FockStateKet): + """ + Many-particle Fock state with a sequence of occupied orbits. + + Explanation + =========== + + Each state can only have one particle, so we choose to store a list of + occupied orbits rather than a tuple with occupation numbers (zeros and ones). + + states below fermi level are holes, and are represented by negative labels + in the occupation list. + + For symbolic state labels, the fermi_level caps the number of allowed hole- + states. + + Examples + ======== + + >>> from sympy.physics.secondquant import FKet + >>> FKet([1, 2]) + FockStateFermionKet((1, 2)) + """ + def _dagger_(self): + return FockStateFermionBra(*self.args) + + +class FockStateFermionBra(FermionState, FockStateBra): + """ + See Also + ======== + + FockStateFermionKet + + Examples + ======== + + >>> from sympy.physics.secondquant import FBra + >>> FBra([1, 2]) + FockStateFermionBra((1, 2)) + """ + def _dagger_(self): + return FockStateFermionKet(*self.args) + +BBra = FockStateBosonBra +BKet = FockStateBosonKet +FBra = FockStateFermionBra +FKet = FockStateFermionKet + + +def _apply_Mul(m): + """ + Take a Mul instance with operators and apply them to states. + + Explanation + =========== + + This method applies all operators with integer state labels + to the actual states. For symbolic state labels, nothing is done. + When inner products of FockStates are encountered (like ), + they are converted to instances of InnerProduct. + + This does not currently work on double inner products like, + . + + If the argument is not a Mul, it is simply returned as is. + """ + if not isinstance(m, Mul): + return m + c_part, nc_part = m.args_cnc() + n_nc = len(nc_part) + if n_nc in (0, 1): + return m + else: + last = nc_part[-1] + next_to_last = nc_part[-2] + if isinstance(last, FockStateKet): + if isinstance(next_to_last, SqOperator): + if next_to_last.is_symbolic: + return m + else: + result = next_to_last.apply_operator(last) + if result == 0: + return S.Zero + else: + return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) + elif isinstance(next_to_last, Pow): + if isinstance(next_to_last.base, SqOperator) and \ + next_to_last.exp.is_Integer: + if next_to_last.base.is_symbolic: + return m + else: + result = last + for i in range(next_to_last.exp): + result = next_to_last.base.apply_operator(result) + if result == 0: + break + if result == 0: + return S.Zero + else: + return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) + else: + return m + elif isinstance(next_to_last, FockStateBra): + result = InnerProduct(next_to_last, last) + if result == 0: + return S.Zero + else: + return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) + else: + return m + else: + return m + + +def apply_operators(e): + """ + Take a SymPy expression with operators and states and apply the operators. + + Examples + ======== + + >>> from sympy.physics.secondquant import apply_operators + >>> from sympy import sympify + >>> apply_operators(sympify(3)+4) + 7 + """ + e = e.expand() + muls = e.atoms(Mul) + subs_list = [(m, _apply_Mul(m)) for m in iter(muls)] + return e.subs(subs_list) + + +class InnerProduct(Basic): + """ + An unevaluated inner product between a bra and ket. + + Explanation + =========== + + Currently this class just reduces things to a product of + Kronecker Deltas. In the future, we could introduce abstract + states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as + ````. + + """ + is_commutative = True + + def __new__(cls, bra, ket): + if not isinstance(bra, FockStateBra): + raise TypeError("must be a bra") + if not isinstance(ket, FockStateKet): + raise TypeError("must be a ket") + return cls.eval(bra, ket) + + @classmethod + def eval(cls, bra, ket): + result = S.One + for i, j in zip(bra.args[0], ket.args[0]): + result *= KroneckerDelta(i, j) + if result == 0: + break + return result + + @property + def bra(self): + """Returns the bra part of the state""" + return self.args[0] + + @property + def ket(self): + """Returns the ket part of the state""" + return self.args[1] + + def __repr__(self): + sbra = repr(self.bra) + sket = repr(self.ket) + return "%s|%s" % (sbra[:-1], sket[1:]) + + def __str__(self): + return self.__repr__() + + +def matrix_rep(op, basis): + """ + Find the representation of an operator in a basis. + + Examples + ======== + + >>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep + >>> b = VarBosonicBasis(5) + >>> o = B(0) + >>> matrix_rep(o, b) + Matrix([ + [0, 1, 0, 0, 0], + [0, 0, sqrt(2), 0, 0], + [0, 0, 0, sqrt(3), 0], + [0, 0, 0, 0, 2], + [0, 0, 0, 0, 0]]) + """ + a = zeros(len(basis)) + for i in range(len(basis)): + for j in range(len(basis)): + a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j]) + return a + + +class BosonicBasis: + """ + Base class for a basis set of bosonic Fock states. + """ + pass + + +class VarBosonicBasis: + """ + A single state, variable particle number basis set. + + Examples + ======== + + >>> from sympy.physics.secondquant import VarBosonicBasis + >>> b = VarBosonicBasis(5) + >>> b + [FockState((0,)), FockState((1,)), FockState((2,)), + FockState((3,)), FockState((4,))] + """ + + def __init__(self, n_max): + self.n_max = n_max + self._build_states() + + def _build_states(self): + self.basis = [] + for i in range(self.n_max): + self.basis.append(FockStateBosonKet([i])) + self.n_basis = len(self.basis) + + def index(self, state): + """ + Returns the index of state in basis. + + Examples + ======== + + >>> from sympy.physics.secondquant import VarBosonicBasis + >>> b = VarBosonicBasis(3) + >>> state = b.state(1) + >>> b + [FockState((0,)), FockState((1,)), FockState((2,))] + >>> state + FockStateBosonKet((1,)) + >>> b.index(state) + 1 + """ + return self.basis.index(state) + + def state(self, i): + """ + The state of a single basis. + + Examples + ======== + + >>> from sympy.physics.secondquant import VarBosonicBasis + >>> b = VarBosonicBasis(5) + >>> b.state(3) + FockStateBosonKet((3,)) + """ + return self.basis[i] + + def __getitem__(self, i): + return self.state(i) + + def __len__(self): + return len(self.basis) + + def __repr__(self): + return repr(self.basis) + + +class FixedBosonicBasis(BosonicBasis): + """ + Fixed particle number basis set. + + Examples + ======== + + >>> from sympy.physics.secondquant import FixedBosonicBasis + >>> b = FixedBosonicBasis(2, 2) + >>> state = b.state(1) + >>> b + [FockState((2, 0)), FockState((1, 1)), FockState((0, 2))] + >>> state + FockStateBosonKet((1, 1)) + >>> b.index(state) + 1 + """ + def __init__(self, n_particles, n_levels): + self.n_particles = n_particles + self.n_levels = n_levels + self._build_particle_locations() + self._build_states() + + def _build_particle_locations(self): + tup = ["i%i" % i for i in range(self.n_particles)] + first_loop = "for i0 in range(%i)" % self.n_levels + other_loops = '' + for cur, prev in zip(tup[1:], tup): + temp = "for %s in range(%s + 1) " % (cur, prev) + other_loops = other_loops + temp + tup_string = "(%s)" % ", ".join(tup) + list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops) + result = eval(list_comp) + if self.n_particles == 1: + result = [(item,) for item in result] + self.particle_locations = result + + def _build_states(self): + self.basis = [] + for tuple_of_indices in self.particle_locations: + occ_numbers = self.n_levels*[0] + for level in tuple_of_indices: + occ_numbers[level] += 1 + self.basis.append(FockStateBosonKet(occ_numbers)) + self.n_basis = len(self.basis) + + def index(self, state): + """Returns the index of state in basis. + + Examples + ======== + + >>> from sympy.physics.secondquant import FixedBosonicBasis + >>> b = FixedBosonicBasis(2, 3) + >>> b.index(b.state(3)) + 3 + """ + return self.basis.index(state) + + def state(self, i): + """Returns the state that lies at index i of the basis + + Examples + ======== + + >>> from sympy.physics.secondquant import FixedBosonicBasis + >>> b = FixedBosonicBasis(2, 3) + >>> b.state(3) + FockStateBosonKet((1, 0, 1)) + """ + return self.basis[i] + + def __getitem__(self, i): + return self.state(i) + + def __len__(self): + return len(self.basis) + + def __repr__(self): + return repr(self.basis) + + +class Commutator(Function): + """ + The Commutator: [A, B] = A*B - B*A + + The arguments are ordered according to .__cmp__() + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import Commutator + >>> A, B = symbols('A,B', commutative=False) + >>> Commutator(B, A) + -Commutator(A, B) + + Evaluate the commutator with .doit() + + >>> comm = Commutator(A,B); comm + Commutator(A, B) + >>> comm.doit() + A*B - B*A + + + For two second quantization operators the commutator is evaluated + immediately: + + >>> from sympy.physics.secondquant import Fd, F + >>> a = symbols('a', above_fermi=True) + >>> i = symbols('i', below_fermi=True) + >>> p,q = symbols('p,q') + + >>> Commutator(Fd(a),Fd(i)) + 2*NO(CreateFermion(a)*CreateFermion(i)) + + But for more complicated expressions, the evaluation is triggered by + a call to .doit() + + >>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm + Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i)) + >>> comm.doit(wicks=True) + -KroneckerDelta(i, p)*CreateFermion(q) + + KroneckerDelta(i, q)*CreateFermion(p) + + """ + + is_commutative = False + + @classmethod + def eval(cls, a, b): + """ + The Commutator [A,B] is on canonical form if A < B. + + Examples + ======== + + >>> from sympy.physics.secondquant import Commutator, F, Fd + >>> from sympy.abc import x + >>> c1 = Commutator(F(x), Fd(x)) + >>> c2 = Commutator(Fd(x), F(x)) + >>> Commutator.eval(c1, c2) + 0 + """ + if not (a and b): + return S.Zero + if a == b: + return S.Zero + if a.is_commutative or b.is_commutative: + return S.Zero + + # + # [A+B,C] -> [A,C] + [B,C] + # + a = a.expand() + if isinstance(a, Add): + return Add(*[cls(term, b) for term in a.args]) + b = b.expand() + if isinstance(b, Add): + return Add(*[cls(a, term) for term in b.args]) + + # + # [xA,yB] -> xy*[A,B] + # + ca, nca = a.args_cnc() + cb, ncb = b.args_cnc() + c_part = list(ca) + list(cb) + if c_part: + return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb))) + + # + # single second quantization operators + # + if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator): + if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson): + return KroneckerDelta(a.state, b.state) + if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson): + return S.NegativeOne*KroneckerDelta(a.state, b.state) + else: + return S.Zero + if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator): + return wicks(a*b) - wicks(b*a) + + # + # Canonical ordering of arguments + # + if a.sort_key() > b.sort_key(): + return S.NegativeOne*cls(b, a) + + def doit(self, **hints): + """ + Enables the computation of complex expressions. + + Examples + ======== + + >>> from sympy.physics.secondquant import Commutator, F, Fd + >>> from sympy import symbols + >>> i, j = symbols('i,j', below_fermi=True) + >>> a, b = symbols('a,b', above_fermi=True) + >>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j)) + >>> c.doit(wicks=True) + 0 + """ + a = self.args[0] + b = self.args[1] + + if hints.get("wicks"): + a = a.doit(**hints) + b = b.doit(**hints) + try: + return wicks(a*b) - wicks(b*a) + except ContractionAppliesOnlyToFermions: + pass + except WicksTheoremDoesNotApply: + pass + + return (a*b - b*a).doit(**hints) + + def __repr__(self): + return "Commutator(%s,%s)" % (self.args[0], self.args[1]) + + def __str__(self): + return "[%s,%s]" % (self.args[0], self.args[1]) + + def _latex(self, printer): + return "\\left[%s,%s\\right]" % tuple([ + printer._print(arg) for arg in self.args]) + + +class NO(Expr): + """ + This Object is used to represent normal ordering brackets. + + i.e. {abcd} sometimes written :abcd: + + Explanation + =========== + + Applying the function NO(arg) to an argument means that all operators in + the argument will be assumed to anticommute, and have vanishing + contractions. This allows an immediate reordering to canonical form + upon object creation. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import NO, F, Fd + >>> p,q = symbols('p,q') + >>> NO(Fd(p)*F(q)) + NO(CreateFermion(p)*AnnihilateFermion(q)) + >>> NO(F(q)*Fd(p)) + -NO(CreateFermion(p)*AnnihilateFermion(q)) + + + Note + ==== + + If you want to generate a normal ordered equivalent of an expression, you + should use the function wicks(). This class only indicates that all + operators inside the brackets anticommute, and have vanishing contractions. + Nothing more, nothing less. + + """ + is_commutative = False + + def __new__(cls, arg): + """ + Use anticommutation to get canonical form of operators. + + Explanation + =========== + + Employ associativity of normal ordered product: {ab{cd}} = {abcd} + but note that {ab}{cd} /= {abcd}. + + We also employ distributivity: {ab + cd} = {ab} + {cd}. + + Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}. + + """ + + # {ab + cd} = {ab} + {cd} + arg = sympify(arg) + arg = arg.expand() + if arg.is_Add: + return Add(*[ cls(term) for term in arg.args]) + + if arg.is_Mul: + + # take coefficient outside of normal ordering brackets + c_part, seq = arg.args_cnc() + if c_part: + coeff = Mul(*c_part) + if not seq: + return coeff + else: + coeff = S.One + + # {ab{cd}} = {abcd} + newseq = [] + foundit = False + for fac in seq: + if isinstance(fac, NO): + newseq.extend(fac.args) + foundit = True + else: + newseq.append(fac) + if foundit: + return coeff*cls(Mul(*newseq)) + + # We assume that the user don't mix B and F operators + if isinstance(seq[0], BosonicOperator): + raise NotImplementedError + + try: + newseq, sign = _sort_anticommuting_fermions(seq) + except ViolationOfPauliPrinciple: + return S.Zero + + if sign % 2: + return (S.NegativeOne*coeff)*cls(Mul(*newseq)) + elif sign: + return coeff*cls(Mul(*newseq)) + else: + pass # since sign==0, no permutations was necessary + + # if we couldn't do anything with Mul object, we just + # mark it as normal ordered + if coeff != S.One: + return coeff*cls(Mul(*newseq)) + return Expr.__new__(cls, Mul(*newseq)) + + if isinstance(arg, NO): + return arg + + # if object was not Mul or Add, normal ordering does not apply + return arg + + @property + def has_q_creators(self): + """ + Return 0 if the leftmost argument of the first argument is a not a + q_creator, else 1 if it is above fermi or -1 if it is below fermi. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import NO, F, Fd + + >>> a = symbols('a', above_fermi=True) + >>> i = symbols('i', below_fermi=True) + >>> NO(Fd(a)*Fd(i)).has_q_creators + 1 + >>> NO(F(i)*F(a)).has_q_creators + -1 + >>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP + 0 + + """ + return self.args[0].args[0].is_q_creator + + @property + def has_q_annihilators(self): + """ + Return 0 if the rightmost argument of the first argument is a not a + q_annihilator, else 1 if it is above fermi or -1 if it is below fermi. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import NO, F, Fd + + >>> a = symbols('a', above_fermi=True) + >>> i = symbols('i', below_fermi=True) + >>> NO(Fd(a)*Fd(i)).has_q_annihilators + -1 + >>> NO(F(i)*F(a)).has_q_annihilators + 1 + >>> NO(Fd(a)*F(i)).has_q_annihilators + 0 + + """ + return self.args[0].args[-1].is_q_annihilator + + def doit(self, **hints): + """ + Either removes the brackets or enables complex computations + in its arguments. + + Examples + ======== + + >>> from sympy.physics.secondquant import NO, Fd, F + >>> from textwrap import fill + >>> from sympy import symbols, Dummy + >>> p,q = symbols('p,q', cls=Dummy) + >>> print(fill(str(NO(Fd(p)*F(q)).doit()))) + KroneckerDelta(_a, _p)*KroneckerDelta(_a, + _q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a, + _p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) - + KroneckerDelta(_a, _q)*KroneckerDelta(_i, + _p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i, + _p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i) + """ + if hints.get("remove_brackets", True): + return self._remove_brackets() + else: + return self.__new__(type(self), self.args[0].doit(**hints)) + + def _remove_brackets(self): + """ + Returns the sorted string without normal order brackets. + + The returned string have the property that no nonzero + contractions exist. + """ + + # check if any creator is also an annihilator + subslist = [] + for i in self.iter_q_creators(): + if self[i].is_q_annihilator: + assume = self[i].state.assumptions0 + + # only operators with a dummy index can be split in two terms + if isinstance(self[i].state, Dummy): + + # create indices with fermi restriction + assume.pop("above_fermi", None) + assume["below_fermi"] = True + below = Dummy('i', **assume) + assume.pop("below_fermi", None) + assume["above_fermi"] = True + above = Dummy('a', **assume) + + cls = type(self[i]) + split = ( + self[i].__new__(cls, below) + * KroneckerDelta(below, self[i].state) + + self[i].__new__(cls, above) + * KroneckerDelta(above, self[i].state) + ) + subslist.append((self[i], split)) + else: + raise SubstitutionOfAmbigousOperatorFailed(self[i]) + if subslist: + result = NO(self.subs(subslist)) + if isinstance(result, Add): + return Add(*[term.doit() for term in result.args]) + else: + return self.args[0] + + def _expand_operators(self): + """ + Returns a sum of NO objects that contain no ambiguous q-operators. + + Explanation + =========== + + If an index q has range both above and below fermi, the operator F(q) + is ambiguous in the sense that it can be both a q-creator and a q-annihilator. + If q is dummy, it is assumed to be a summation variable and this method + rewrites it into a sum of NO terms with unambiguous operators: + + {Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)} + + where a,b are above and i,j are below fermi level. + """ + return NO(self._remove_brackets) + + def __getitem__(self, i): + if isinstance(i, slice): + indices = i.indices(len(self)) + return [self.args[0].args[i] for i in range(*indices)] + else: + return self.args[0].args[i] + + def __len__(self): + return len(self.args[0].args) + + def iter_q_annihilators(self): + """ + Iterates over the annihilation operators. + + Examples + ======== + + >>> from sympy import symbols + >>> i, j = symbols('i j', below_fermi=True) + >>> a, b = symbols('a b', above_fermi=True) + >>> from sympy.physics.secondquant import NO, F, Fd + >>> no = NO(Fd(a)*F(i)*F(b)*Fd(j)) + + >>> no.iter_q_creators() + + >>> list(no.iter_q_creators()) + [0, 1] + >>> list(no.iter_q_annihilators()) + [3, 2] + + """ + ops = self.args[0].args + iter = range(len(ops) - 1, -1, -1) + for i in iter: + if ops[i].is_q_annihilator: + yield i + else: + break + + def iter_q_creators(self): + """ + Iterates over the creation operators. + + Examples + ======== + + >>> from sympy import symbols + >>> i, j = symbols('i j', below_fermi=True) + >>> a, b = symbols('a b', above_fermi=True) + >>> from sympy.physics.secondquant import NO, F, Fd + >>> no = NO(Fd(a)*F(i)*F(b)*Fd(j)) + + >>> no.iter_q_creators() + + >>> list(no.iter_q_creators()) + [0, 1] + >>> list(no.iter_q_annihilators()) + [3, 2] + + """ + + ops = self.args[0].args + iter = range(0, len(ops)) + for i in iter: + if ops[i].is_q_creator: + yield i + else: + break + + def get_subNO(self, i): + """ + Returns a NO() without FermionicOperator at index i. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import F, NO + >>> p, q, r = symbols('p,q,r') + + >>> NO(F(p)*F(q)*F(r)).get_subNO(1) + NO(AnnihilateFermion(p)*AnnihilateFermion(r)) + + """ + arg0 = self.args[0] # it's a Mul by definition of how it's created + mul = arg0._new_rawargs(*(arg0.args[:i] + arg0.args[i + 1:])) + return NO(mul) + + def _latex(self, printer): + return "\\left\\{%s\\right\\}" % printer._print(self.args[0]) + + def __repr__(self): + return "NO(%s)" % self.args[0] + + def __str__(self): + return ":%s:" % self.args[0] + + +def contraction(a, b): + """ + Calculates contraction of Fermionic operators a and b. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.secondquant import F, Fd, contraction + >>> p, q = symbols('p,q') + >>> a, b = symbols('a,b', above_fermi=True) + >>> i, j = symbols('i,j', below_fermi=True) + + A contraction is non-zero only if a quasi-creator is to the right of a + quasi-annihilator: + + >>> contraction(F(a),Fd(b)) + KroneckerDelta(a, b) + >>> contraction(Fd(i),F(j)) + KroneckerDelta(i, j) + + For general indices a non-zero result restricts the indices to below/above + the fermi surface: + + >>> contraction(Fd(p),F(q)) + KroneckerDelta(_i, q)*KroneckerDelta(p, q) + >>> contraction(F(p),Fd(q)) + KroneckerDelta(_a, q)*KroneckerDelta(p, q) + + Two creators or two annihilators always vanishes: + + >>> contraction(F(p),F(q)) + 0 + >>> contraction(Fd(p),Fd(q)) + 0 + + """ + if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator): + if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion): + if b.state.assumptions0.get("below_fermi"): + return S.Zero + if a.state.assumptions0.get("below_fermi"): + return S.Zero + if b.state.assumptions0.get("above_fermi"): + return KroneckerDelta(a.state, b.state) + if a.state.assumptions0.get("above_fermi"): + return KroneckerDelta(a.state, b.state) + + return (KroneckerDelta(a.state, b.state)* + KroneckerDelta(b.state, Dummy('a', above_fermi=True))) + if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion): + if b.state.assumptions0.get("above_fermi"): + return S.Zero + if a.state.assumptions0.get("above_fermi"): + return S.Zero + if b.state.assumptions0.get("below_fermi"): + return KroneckerDelta(a.state, b.state) + if a.state.assumptions0.get("below_fermi"): + return KroneckerDelta(a.state, b.state) + + return (KroneckerDelta(a.state, b.state)* + KroneckerDelta(b.state, Dummy('i', below_fermi=True))) + + # vanish if 2xAnnihilator or 2xCreator + return S.Zero + + else: + #not fermion operators + t = ( isinstance(i, FermionicOperator) for i in (a, b) ) + raise ContractionAppliesOnlyToFermions(*t) + + +def _sqkey(sq_operator): + """Generates key for canonical sorting of SQ operators.""" + return sq_operator._sortkey() + + +def _sort_anticommuting_fermions(string1, key=_sqkey): + """Sort fermionic operators to canonical order, assuming all pairs anticommute. + + Explanation + =========== + + Uses a bidirectional bubble sort. Items in string1 are not referenced + so in principle they may be any comparable objects. The sorting depends on the + operators '>' and '=='. + + If the Pauli principle is violated, an exception is raised. + + Returns + ======= + + tuple (sorted_str, sign) + + sorted_str: list containing the sorted operators + sign: int telling how many times the sign should be changed + (if sign==0 the string was already sorted) + """ + + verified = False + sign = 0 + rng = list(range(len(string1) - 1)) + rev = list(range(len(string1) - 3, -1, -1)) + + keys = list(map(key, string1)) + key_val = dict(list(zip(keys, string1))) + + while not verified: + verified = True + for i in rng: + left = keys[i] + right = keys[i + 1] + if left == right: + raise ViolationOfPauliPrinciple([left, right]) + if left > right: + verified = False + keys[i:i + 2] = [right, left] + sign = sign + 1 + if verified: + break + for i in rev: + left = keys[i] + right = keys[i + 1] + if left == right: + raise ViolationOfPauliPrinciple([left, right]) + if left > right: + verified = False + keys[i:i + 2] = [right, left] + sign = sign + 1 + string1 = [ key_val[k] for k in keys ] + return (string1, sign) + + +def evaluate_deltas(e): + """ + We evaluate KroneckerDelta symbols in the expression assuming Einstein summation. + + Explanation + =========== + + If one index is repeated it is summed over and in effect substituted with + the other one. If both indices are repeated we substitute according to what + is the preferred index. this is determined by + KroneckerDelta.preferred_index and KroneckerDelta.killable_index. + + In case there are no possible substitutions or if a substitution would + imply a loss of information, nothing is done. + + In case an index appears in more than one KroneckerDelta, the resulting + substitution depends on the order of the factors. Since the ordering is platform + dependent, the literal expression resulting from this function may be hard to + predict. + + Examples + ======== + + We assume the following: + + >>> from sympy import symbols, Function, Dummy, KroneckerDelta + >>> from sympy.physics.secondquant import evaluate_deltas + >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) + >>> a,b = symbols('a b', above_fermi=True, cls=Dummy) + >>> p,q = symbols('p q', cls=Dummy) + >>> f = Function('f') + >>> t = Function('t') + + The order of preference for these indices according to KroneckerDelta is + (a, b, i, j, p, q). + + Trivial cases: + + >>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j) + f(_j) + >>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i) + f(_i) + >>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i) + f(_i) + >>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q) + f(_q) + >>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p) + f(_p) + + More interesting cases: + + >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q)) + f(_i, _q)*t(_a, _i) + >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q)) + f(_a, _q)*t(_a, _i) + >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q)) + f(_p, _p) + + Finally, here are some cases where nothing is done, because that would + imply a loss of information: + + >>> evaluate_deltas(KroneckerDelta(i,p)*f(q)) + f(_q)*KroneckerDelta(_i, _p) + >>> evaluate_deltas(KroneckerDelta(i,p)*f(i)) + f(_i)*KroneckerDelta(_i, _p) + """ + + # We treat Deltas only in mul objects + # for general function objects we don't evaluate KroneckerDeltas in arguments, + # but here we hard code exceptions to this rule + accepted_functions = ( + Add, + ) + if isinstance(e, accepted_functions): + return e.func(*[evaluate_deltas(arg) for arg in e.args]) + + elif isinstance(e, Mul): + # find all occurrences of delta function and count each index present in + # expression. + deltas = [] + indices = {} + for i in e.args: + for s in i.free_symbols: + if s in indices: + indices[s] += 1 + else: + indices[s] = 0 # geek counting simplifies logic below + if isinstance(i, KroneckerDelta): + deltas.append(i) + + for d in deltas: + # If we do something, and there are more deltas, we should recurse + # to treat the resulting expression properly + if d.killable_index.is_Symbol and indices[d.killable_index]: + e = e.subs(d.killable_index, d.preferred_index) + if len(deltas) > 1: + return evaluate_deltas(e) + elif (d.preferred_index.is_Symbol and indices[d.preferred_index] + and d.indices_contain_equal_information): + e = e.subs(d.preferred_index, d.killable_index) + if len(deltas) > 1: + return evaluate_deltas(e) + else: + pass + + return e + # nothing to do, maybe we hit a Symbol or a number + else: + return e + + +def substitute_dummies(expr, new_indices=False, pretty_indices={}): + """ + Collect terms by substitution of dummy variables. + + Explanation + =========== + + This routine allows simplification of Add expressions containing terms + which differ only due to dummy variables. + + The idea is to substitute all dummy variables consistently depending on + the structure of the term. For each term, we obtain a sequence of all + dummy variables, where the order is determined by the index range, what + factors the index belongs to and its position in each factor. See + _get_ordered_dummies() for more information about the sorting of dummies. + The index sequence is then substituted consistently in each term. + + Examples + ======== + + >>> from sympy import symbols, Function, Dummy + >>> from sympy.physics.secondquant import substitute_dummies + >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy) + >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) + >>> f = Function('f') + + >>> expr = f(a,b) + f(c,d); expr + f(_a, _b) + f(_c, _d) + + Since a, b, c and d are equivalent summation indices, the expression can be + simplified to a single term (for which the dummy indices are still summed over) + + >>> substitute_dummies(expr) + 2*f(_a, _b) + + + Controlling output: + + By default the dummy symbols that are already present in the expression + will be reused in a different permutation. However, if new_indices=True, + new dummies will be generated and inserted. The keyword 'pretty_indices' + can be used to control this generation of new symbols. + + By default the new dummies will be generated on the form i_1, i_2, a_1, + etc. If you supply a dictionary with key:value pairs in the form: + + { index_group: string_of_letters } + + The letters will be used as labels for the new dummy symbols. The + index_groups must be one of 'above', 'below' or 'general'. + + >>> expr = f(a,b,i,j) + >>> my_dummies = { 'above':'st', 'below':'uv' } + >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) + f(_s, _t, _u, _v) + + If we run out of letters, or if there is no keyword for some index_group + the default dummy generator will be used as a fallback: + + >>> p,q = symbols('p q', cls=Dummy) # general indices + >>> expr = f(p,q) + >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) + f(_p_0, _p_1) + + """ + + # setup the replacing dummies + if new_indices: + letters_above = pretty_indices.get('above', "") + letters_below = pretty_indices.get('below', "") + letters_general = pretty_indices.get('general', "") + len_above = len(letters_above) + len_below = len(letters_below) + len_general = len(letters_general) + + def _i(number): + try: + return letters_below[number] + except IndexError: + return 'i_' + str(number - len_below) + + def _a(number): + try: + return letters_above[number] + except IndexError: + return 'a_' + str(number - len_above) + + def _p(number): + try: + return letters_general[number] + except IndexError: + return 'p_' + str(number - len_general) + + aboves = [] + belows = [] + generals = [] + + dummies = expr.atoms(Dummy) + if not new_indices: + dummies = sorted(dummies, key=default_sort_key) + + # generate lists with the dummies we will insert + a = i = p = 0 + for d in dummies: + assum = d.assumptions0 + + if assum.get("above_fermi"): + if new_indices: + sym = _a(a) + a += 1 + l1 = aboves + elif assum.get("below_fermi"): + if new_indices: + sym = _i(i) + i += 1 + l1 = belows + else: + if new_indices: + sym = _p(p) + p += 1 + l1 = generals + + if new_indices: + l1.append(Dummy(sym, **assum)) + else: + l1.append(d) + + expr = expr.expand() + terms = Add.make_args(expr) + new_terms = [] + for term in terms: + i = iter(belows) + a = iter(aboves) + p = iter(generals) + ordered = _get_ordered_dummies(term) + subsdict = {} + for d in ordered: + if d.assumptions0.get('below_fermi'): + subsdict[d] = next(i) + elif d.assumptions0.get('above_fermi'): + subsdict[d] = next(a) + else: + subsdict[d] = next(p) + subslist = [] + final_subs = [] + for k, v in subsdict.items(): + if k == v: + continue + if v in subsdict: + # We check if the sequence of substitutions end quickly. In + # that case, we can avoid temporary symbols if we ensure the + # correct substitution order. + if subsdict[v] in subsdict: + # (x, y) -> (y, x), we need a temporary variable + x = Dummy('x') + subslist.append((k, x)) + final_subs.append((x, v)) + else: + # (x, y) -> (y, a), x->y must be done last + # but before temporary variables are resolved + final_subs.insert(0, (k, v)) + else: + subslist.append((k, v)) + subslist.extend(final_subs) + new_terms.append(term.subs(subslist)) + return Add(*new_terms) + + +class KeyPrinter(StrPrinter): + """Printer for which only equal objects are equal in print""" + def _print_Dummy(self, expr): + return "(%s_%i)" % (expr.name, expr.dummy_index) + + +def __kprint(expr): + p = KeyPrinter() + return p.doprint(expr) + + +def _get_ordered_dummies(mul, verbose=False): + """Returns all dummies in the mul sorted in canonical order. + + Explanation + =========== + + The purpose of the canonical ordering is that dummies can be substituted + consistently across terms with the result that equivalent terms can be + simplified. + + It is not possible to determine if two terms are equivalent based solely on + the dummy order. However, a consistent substitution guided by the ordered + dummies should lead to trivially (non-)equivalent terms, thereby revealing + the equivalence. This also means that if two terms have identical sequences of + dummies, the (non-)equivalence should already be apparent. + + Strategy + -------- + + The canonical order is given by an arbitrary sorting rule. A sort key + is determined for each dummy as a tuple that depends on all factors where + the index is present. The dummies are thereby sorted according to the + contraction structure of the term, instead of sorting based solely on the + dummy symbol itself. + + After all dummies in the term has been assigned a key, we check for identical + keys, i.e. unorderable dummies. If any are found, we call a specialized + method, _determine_ambiguous(), that will determine a unique order based + on recursive calls to _get_ordered_dummies(). + + Key description + --------------- + + A high level description of the sort key: + + 1. Range of the dummy index + 2. Relation to external (non-dummy) indices + 3. Position of the index in the first factor + 4. Position of the index in the second factor + + The sort key is a tuple with the following components: + + 1. A single character indicating the range of the dummy (above, below + or general.) + 2. A list of strings with fully masked string representations of all + factors where the dummy is present. By masked, we mean that dummies + are represented by a symbol to indicate either below fermi, above or + general. No other information is displayed about the dummies at + this point. The list is sorted stringwise. + 3. An integer number indicating the position of the index, in the first + factor as sorted in 2. + 4. An integer number indicating the position of the index, in the second + factor as sorted in 2. + + If a factor is either of type AntiSymmetricTensor or SqOperator, the index + position in items 3 and 4 is indicated as 'upper' or 'lower' only. + (Creation operators are considered upper and annihilation operators lower.) + + If the masked factors are identical, the two factors cannot be ordered + unambiguously in item 2. In this case, items 3, 4 are left out. If several + indices are contracted between the unorderable factors, it will be handled by + _determine_ambiguous() + + + """ + # setup dicts to avoid repeated calculations in key() + args = Mul.make_args(mul) + fac_dum = { fac: fac.atoms(Dummy) for fac in args } + fac_repr = { fac: __kprint(fac) for fac in args } + all_dums = set().union(*fac_dum.values()) + mask = {} + for d in all_dums: + if d.assumptions0.get('below_fermi'): + mask[d] = '0' + elif d.assumptions0.get('above_fermi'): + mask[d] = '1' + else: + mask[d] = '2' + dum_repr = {d: __kprint(d) for d in all_dums} + + def _key(d): + dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ] + other_dums = set().union(*[fac_dum[fac] for fac in dumstruct]) + fac = dumstruct[-1] + if other_dums is fac_dum[fac]: + other_dums = fac_dum[fac].copy() + other_dums.remove(d) + masked_facs = [ fac_repr[fac] for fac in dumstruct ] + for d2 in other_dums: + masked_facs = [ fac.replace(dum_repr[d2], mask[d2]) + for fac in masked_facs ] + all_masked = [ fac.replace(dum_repr[d], mask[d]) + for fac in masked_facs ] + masked_facs = dict(list(zip(dumstruct, masked_facs))) + + # dummies for which the ordering cannot be determined + if has_dups(all_masked): + all_masked.sort() + return mask[d], tuple(all_masked) # positions are ambiguous + + # sort factors according to fully masked strings + keydict = dict(list(zip(dumstruct, all_masked))) + dumstruct.sort(key=lambda x: keydict[x]) + all_masked.sort() + + pos_val = [] + for fac in dumstruct: + if isinstance(fac, AntiSymmetricTensor): + if d in fac.upper: + pos_val.append('u') + if d in fac.lower: + pos_val.append('l') + elif isinstance(fac, Creator): + pos_val.append('u') + elif isinstance(fac, Annihilator): + pos_val.append('l') + elif isinstance(fac, NO): + ops = [ op for op in fac if op.has(d) ] + for op in ops: + if isinstance(op, Creator): + pos_val.append('u') + else: + pos_val.append('l') + else: + # fallback to position in string representation + facpos = -1 + while 1: + facpos = masked_facs[fac].find(dum_repr[d], facpos + 1) + if facpos == -1: + break + pos_val.append(facpos) + return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1]) + dumkey = dict(list(zip(all_dums, list(map(_key, all_dums))))) + result = sorted(all_dums, key=lambda x: dumkey[x]) + if has_dups(iter(dumkey.values())): + # We have ambiguities + unordered = defaultdict(set) + for d, k in dumkey.items(): + unordered[k].add(d) + for k in [ k for k in unordered if len(unordered[k]) < 2 ]: + del unordered[k] + + unordered = [ unordered[k] for k in sorted(unordered) ] + result = _determine_ambiguous(mul, result, unordered) + return result + + +def _determine_ambiguous(term, ordered, ambiguous_groups): + # We encountered a term for which the dummy substitution is ambiguous. + # This happens for terms with 2 or more contractions between factors that + # cannot be uniquely ordered independent of summation indices. For + # example: + # + # Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .} + # + # Assuming that the indices represented by . are dummies with the + # same range, the factors cannot be ordered, and there is no + # way to determine a consistent ordering of p and q. + # + # The strategy employed here, is to relabel all unambiguous dummies with + # non-dummy symbols and call _get_ordered_dummies again. This procedure is + # applied to the entire term so there is a possibility that + # _determine_ambiguous() is called again from a deeper recursion level. + + # break recursion if there are no ordered dummies + all_ambiguous = set() + for dummies in ambiguous_groups: + all_ambiguous |= dummies + all_ordered = set(ordered) - all_ambiguous + if not all_ordered: + # FIXME: If we arrive here, there are no ordered dummies. A method to + # handle this needs to be implemented. In order to return something + # useful nevertheless, we choose arbitrarily the first dummy and + # determine the rest from this one. This method is dependent on the + # actual dummy labels which violates an assumption for the + # canonicalization procedure. A better implementation is needed. + group = [ d for d in ordered if d in ambiguous_groups[0] ] + d = group[0] + all_ordered.add(d) + ambiguous_groups[0].remove(d) + + stored_counter = _symbol_factory._counter + subslist = [] + for d in [ d for d in ordered if d in all_ordered ]: + nondum = _symbol_factory._next() + subslist.append((d, nondum)) + newterm = term.subs(subslist) + neworder = _get_ordered_dummies(newterm) + _symbol_factory._set_counter(stored_counter) + + # update ordered list with new information + for group in ambiguous_groups: + ordered_group = [ d for d in neworder if d in group ] + ordered_group.reverse() + result = [] + for d in ordered: + if d in group: + result.append(ordered_group.pop()) + else: + result.append(d) + ordered = result + return ordered + + +class _SymbolFactory: + def __init__(self, label): + self._counterVar = 0 + self._label = label + + def _set_counter(self, value): + """ + Sets counter to value. + """ + self._counterVar = value + + @property + def _counter(self): + """ + What counter is currently at. + """ + return self._counterVar + + def _next(self): + """ + Generates the next symbols and increments counter by 1. + """ + s = Symbol("%s%i" % (self._label, self._counterVar)) + self._counterVar += 1 + return s +_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label + + +@cacheit +def _get_contractions(string1, keep_only_fully_contracted=False): + """ + Returns Add-object with contracted terms. + + Uses recursion to find all contractions. -- Internal helper function -- + + Will find nonzero contractions in string1 between indices given in + leftrange and rightrange. + + """ + + # Should we store current level of contraction? + if keep_only_fully_contracted and string1: + result = [] + else: + result = [NO(Mul(*string1))] + + for i in range(len(string1) - 1): + for j in range(i + 1, len(string1)): + + c = contraction(string1[i], string1[j]) + + if c: + sign = (j - i + 1) % 2 + if sign: + coeff = S.NegativeOne*c + else: + coeff = c + + # + # Call next level of recursion + # ============================ + # + # We now need to find more contractions among operators + # + # oplist = string1[:i]+ string1[i+1:j] + string1[j+1:] + # + # To prevent overcounting, we don't allow contractions + # we have already encountered. i.e. contractions between + # string1[:i] <---> string1[i+1:j] + # and string1[:i] <---> string1[j+1:]. + # + # This leaves the case: + oplist = string1[i + 1:j] + string1[j + 1:] + + if oplist: + + result.append(coeff*NO( + Mul(*string1[:i])*_get_contractions( oplist, + keep_only_fully_contracted=keep_only_fully_contracted))) + + else: + result.append(coeff*NO( Mul(*string1[:i]))) + + if keep_only_fully_contracted: + break # next iteration over i leaves leftmost operator string1[0] uncontracted + + return Add(*result) + + +def wicks(e, **kw_args): + """ + Returns the normal ordered equivalent of an expression using Wicks Theorem. + + Examples + ======== + + >>> from sympy import symbols, Dummy + >>> from sympy.physics.secondquant import wicks, F, Fd + >>> p, q, r = symbols('p,q,r') + >>> wicks(Fd(p)*F(q)) + KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q)) + + By default, the expression is expanded: + + >>> wicks(F(p)*(F(q)+F(r))) + NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r)) + + With the keyword 'keep_only_fully_contracted=True', only fully contracted + terms are returned. + + By request, the result can be simplified in the following order: + -- KroneckerDelta functions are evaluated + -- Dummy variables are substituted consistently across terms + + >>> p, q, r = symbols('p q r', cls=Dummy) + >>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True) + KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r) + + """ + + if not e: + return S.Zero + + opts = { + 'simplify_kronecker_deltas': False, + 'expand': True, + 'simplify_dummies': False, + 'keep_only_fully_contracted': False + } + opts.update(kw_args) + + # check if we are already normally ordered + if isinstance(e, NO): + if opts['keep_only_fully_contracted']: + return S.Zero + else: + return e + elif isinstance(e, FermionicOperator): + if opts['keep_only_fully_contracted']: + return S.Zero + else: + return e + + # break up any NO-objects, and evaluate commutators + e = e.doit(wicks=True) + + # make sure we have only one term to consider + e = e.expand() + if isinstance(e, Add): + if opts['simplify_dummies']: + return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args])) + else: + return Add(*[ wicks(term, **kw_args) for term in e.args]) + + # For Mul-objects we can actually do something + if isinstance(e, Mul): + + # we don't want to mess around with commuting part of Mul + # so we factorize it out before starting recursion + c_part = [] + string1 = [] + for factor in e.args: + if factor.is_commutative: + c_part.append(factor) + else: + string1.append(factor) + n = len(string1) + + # catch trivial cases + if n == 0: + result = e + elif n == 1: + if opts['keep_only_fully_contracted']: + return S.Zero + else: + result = e + + else: # non-trivial + + if isinstance(string1[0], BosonicOperator): + raise NotImplementedError + + string1 = tuple(string1) + + # recursion over higher order contractions + result = _get_contractions(string1, + keep_only_fully_contracted=opts['keep_only_fully_contracted'] ) + result = Mul(*c_part)*result + + if opts['expand']: + result = result.expand() + if opts['simplify_kronecker_deltas']: + result = evaluate_deltas(result) + + return result + + # there was nothing to do + return e + + +class PermutationOperator(Expr): + """ + Represents the index permutation operator P(ij). + + P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i) + """ + is_commutative = True + + def __new__(cls, i, j): + i, j = sorted(map(sympify, (i, j)), key=default_sort_key) + obj = Basic.__new__(cls, i, j) + return obj + + def get_permuted(self, expr): + """ + Returns -expr with permuted indices. + + Explanation + =========== + + >>> from sympy import symbols, Function + >>> from sympy.physics.secondquant import PermutationOperator + >>> p,q = symbols('p,q') + >>> f = Function('f') + >>> PermutationOperator(p,q).get_permuted(f(p,q)) + -f(q, p) + + """ + i = self.args[0] + j = self.args[1] + if expr.has(i) and expr.has(j): + tmp = Dummy() + expr = expr.subs(i, tmp) + expr = expr.subs(j, i) + expr = expr.subs(tmp, j) + return S.NegativeOne*expr + else: + return expr + + def _latex(self, printer): + return "P(%s%s)" % self.args + + +def simplify_index_permutations(expr, permutation_operators): + """ + Performs simplification by introducing PermutationOperators where appropriate. + + Explanation + =========== + + Schematically: + [abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij] + + permutation_operators is a list of PermutationOperators to consider. + + If permutation_operators=[P(ab),P(ij)] we will try to introduce the + permutation operators P(ij) and P(ab) in the expression. If there are other + possible simplifications, we ignore them. + + >>> from sympy import symbols, Function + >>> from sympy.physics.secondquant import simplify_index_permutations + >>> from sympy.physics.secondquant import PermutationOperator + >>> p,q,r,s = symbols('p,q,r,s') + >>> f = Function('f') + >>> g = Function('g') + + >>> expr = f(p)*g(q) - f(q)*g(p); expr + f(p)*g(q) - f(q)*g(p) + >>> simplify_index_permutations(expr,[PermutationOperator(p,q)]) + f(p)*g(q)*PermutationOperator(p, q) + + >>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)] + >>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r) + >>> simplify_index_permutations(expr,PermutList) + f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s) + + """ + + def _get_indices(expr, ind): + """ + Collects indices recursively in predictable order. + """ + result = [] + for arg in expr.args: + if arg in ind: + result.append(arg) + else: + if arg.args: + result.extend(_get_indices(arg, ind)) + return result + + def _choose_one_to_keep(a, b, ind): + # we keep the one where indices in ind are in order ind[0] < ind[1] + return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind))) + + expr = expr.expand() + if isinstance(expr, Add): + terms = set(expr.args) + + for P in permutation_operators: + new_terms = set() + on_hold = set() + while terms: + term = terms.pop() + permuted = P.get_permuted(term) + if permuted in terms | on_hold: + try: + terms.remove(permuted) + except KeyError: + on_hold.remove(permuted) + keep = _choose_one_to_keep(term, permuted, P.args) + new_terms.add(P*keep) + else: + + # Some terms must get a second chance because the permuted + # term may already have canonical dummy ordering. Then + # substitute_dummies() does nothing. However, the other + # term, if it exists, will be able to match with us. + permuted1 = permuted + permuted = substitute_dummies(permuted) + if permuted1 == permuted: + on_hold.add(term) + elif permuted in terms | on_hold: + try: + terms.remove(permuted) + except KeyError: + on_hold.remove(permuted) + keep = _choose_one_to_keep(term, permuted, P.args) + new_terms.add(P*keep) + else: + new_terms.add(term) + terms = new_terms | on_hold + return Add(*terms) + return expr diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e714852064c0b940ebda2e5fe7a08faf13f07ed0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__init__.py @@ -0,0 +1,36 @@ +__all__ = [ + 'CoordinateSym', 'ReferenceFrame', + + 'Dyadic', + + 'Vector', + + 'Point', + + 'cross', 'dot', 'express', 'time_derivative', 'outer', + 'kinematic_equations', 'get_motion_params', 'partial_velocity', + 'dynamicsymbols', + + 'vprint', 'vsstrrepr', 'vsprint', 'vpprint', 'vlatex', 'init_vprinting', + + 'curl', 'divergence', 'gradient', 'is_conservative', 'is_solenoidal', + 'scalar_potential', 'scalar_potential_difference', + +] +from .frame import CoordinateSym, ReferenceFrame + +from .dyadic import Dyadic + +from .vector import Vector + +from .point import Point + +from .functions import (cross, dot, express, time_derivative, outer, + kinematic_equations, get_motion_params, partial_velocity, + dynamicsymbols) + +from .printing import (vprint, vsstrrepr, vsprint, vpprint, vlatex, + init_vprinting) + +from .fieldfunctions import (curl, divergence, gradient, is_conservative, + is_solenoidal, scalar_potential, scalar_potential_difference) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1259507c8eceec243fe07e4c362ed3c4821c0d7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fdf6e4d0628e74d27c88eb387b089a36dfadc0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b1c9bf72e0c5c365f805d02366910d514cde928 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/frame.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95f1ea1f0ba99cafd221eab7be6426cc798a9084 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/frame.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa69b536956ba7a2035d2ccc3302246fa68d6e19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/point.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/point.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5194924cdc7c87c653528629974e670f52b8b07 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/point.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/printing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/printing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a15a4264cbba6c6414d30d76d631890dd3cfa2af Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/printing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/vector.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/vector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2bd5895eb321b74b92bb3dc4bfdd68ee5cd1e0e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/vector.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/dyadic.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/dyadic.py new file mode 100644 index 0000000000000000000000000000000000000000..93d3e49b6c9c382953faeda8566b6438b13681f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/dyadic.py @@ -0,0 +1,601 @@ +from sympy.core.backend import sympify, Add, ImmutableMatrix as Matrix +from sympy.core.evalf import EvalfMixin +from sympy.printing.defaults import Printable + +from mpmath.libmp.libmpf import prec_to_dps + + +__all__ = ['Dyadic'] + + +class Dyadic(Printable, EvalfMixin): + """A Dyadic object. + + See: + https://en.wikipedia.org/wiki/Dyadic_tensor + Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill + + A more powerful way to represent a rigid body's inertia. While it is more + complex, by choosing Dyadic components to be in body fixed basis vectors, + the resulting matrix is equivalent to the inertia tensor. + + """ + + is_number = False + + def __init__(self, inlist): + """ + Just like Vector's init, you should not call this unless creating a + zero dyadic. + + zd = Dyadic(0) + + Stores a Dyadic as a list of lists; the inner list has the measure + number and the two unit vectors; the outerlist holds each unique + unit vector pair. + + """ + + self.args = [] + if inlist == 0: + inlist = [] + while len(inlist) != 0: + added = 0 + for i, v in enumerate(self.args): + if ((str(inlist[0][1]) == str(self.args[i][1])) and + (str(inlist[0][2]) == str(self.args[i][2]))): + self.args[i] = (self.args[i][0] + inlist[0][0], + inlist[0][1], inlist[0][2]) + inlist.remove(inlist[0]) + added = 1 + break + if added != 1: + self.args.append(inlist[0]) + inlist.remove(inlist[0]) + i = 0 + # This code is to remove empty parts from the list + while i < len(self.args): + if ((self.args[i][0] == 0) | (self.args[i][1] == 0) | + (self.args[i][2] == 0)): + self.args.remove(self.args[i]) + i -= 1 + i += 1 + + @property + def func(self): + """Returns the class Dyadic. """ + return Dyadic + + def __add__(self, other): + """The add operator for Dyadic. """ + other = _check_dyadic(other) + return Dyadic(self.args + other.args) + + def __and__(self, other): + """The inner product operator for a Dyadic and a Dyadic or Vector. + + Parameters + ========== + + other : Dyadic or Vector + The other Dyadic or Vector to take the inner product with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer + >>> N = ReferenceFrame('N') + >>> D1 = outer(N.x, N.y) + >>> D2 = outer(N.y, N.y) + >>> D1.dot(D2) + (N.x|N.y) + >>> D1.dot(N.y) + N.x + + """ + from sympy.physics.vector.vector import Vector, _check_vector + if isinstance(other, Dyadic): + other = _check_dyadic(other) + ol = Dyadic(0) + for i, v in enumerate(self.args): + for i2, v2 in enumerate(other.args): + ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2]) + else: + other = _check_vector(other) + ol = Vector(0) + for i, v in enumerate(self.args): + ol += v[0] * v[1] * (v[2] & other) + return ol + + def __truediv__(self, other): + """Divides the Dyadic by a sympifyable expression. """ + return self.__mul__(1 / other) + + def __eq__(self, other): + """Tests for equality. + + Is currently weak; needs stronger comparison testing + + """ + + if other == 0: + other = Dyadic(0) + other = _check_dyadic(other) + if (self.args == []) and (other.args == []): + return True + elif (self.args == []) or (other.args == []): + return False + return set(self.args) == set(other.args) + + def __mul__(self, other): + """Multiplies the Dyadic by a sympifyable expression. + + Parameters + ========== + + other : Sympafiable + The scalar to multiply this Dyadic with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer + >>> N = ReferenceFrame('N') + >>> d = outer(N.x, N.x) + >>> 5 * d + 5*(N.x|N.x) + + """ + + newlist = list(self.args) + other = sympify(other) + for i, v in enumerate(newlist): + newlist[i] = (other * newlist[i][0], newlist[i][1], + newlist[i][2]) + return Dyadic(newlist) + + def __ne__(self, other): + return not self == other + + def __neg__(self): + return self * -1 + + def _latex(self, printer): + ar = self.args # just to shorten things + if len(ar) == 0: + return str(0) + ol = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + # if the coef of the dyadic is 1, we skip the 1 + if ar[i][0] == 1: + ol.append(' + ' + printer._print(ar[i][1]) + r"\otimes " + + printer._print(ar[i][2])) + # if the coef of the dyadic is -1, we skip the 1 + elif ar[i][0] == -1: + ol.append(' - ' + + printer._print(ar[i][1]) + + r"\otimes " + + printer._print(ar[i][2])) + # If the coefficient of the dyadic is not 1 or -1, + # we might wrap it in parentheses, for readability. + elif ar[i][0] != 0: + arg_str = printer._print(ar[i][0]) + if isinstance(ar[i][0], Add): + arg_str = '(%s)' % arg_str + if arg_str.startswith('-'): + arg_str = arg_str[1:] + str_start = ' - ' + else: + str_start = ' + ' + ol.append(str_start + arg_str + printer._print(ar[i][1]) + + r"\otimes " + printer._print(ar[i][2])) + outstr = ''.join(ol) + if outstr.startswith(' + '): + outstr = outstr[3:] + elif outstr.startswith(' '): + outstr = outstr[1:] + return outstr + + def _pretty(self, printer): + e = self + + class Fake: + baseline = 0 + + def render(self, *args, **kwargs): + ar = e.args # just to shorten things + mpp = printer + if len(ar) == 0: + return str(0) + bar = "\N{CIRCLED TIMES}" if printer._use_unicode else "|" + ol = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + # if the coef of the dyadic is 1, we skip the 1 + if ar[i][0] == 1: + ol.extend([" + ", + mpp.doprint(ar[i][1]), + bar, + mpp.doprint(ar[i][2])]) + + # if the coef of the dyadic is -1, we skip the 1 + elif ar[i][0] == -1: + ol.extend([" - ", + mpp.doprint(ar[i][1]), + bar, + mpp.doprint(ar[i][2])]) + + # If the coefficient of the dyadic is not 1 or -1, + # we might wrap it in parentheses, for readability. + elif ar[i][0] != 0: + if isinstance(ar[i][0], Add): + arg_str = mpp._print( + ar[i][0]).parens()[0] + else: + arg_str = mpp.doprint(ar[i][0]) + if arg_str.startswith("-"): + arg_str = arg_str[1:] + str_start = " - " + else: + str_start = " + " + ol.extend([str_start, arg_str, " ", + mpp.doprint(ar[i][1]), + bar, + mpp.doprint(ar[i][2])]) + + outstr = "".join(ol) + if outstr.startswith(" + "): + outstr = outstr[3:] + elif outstr.startswith(" "): + outstr = outstr[1:] + return outstr + return Fake() + + def __rand__(self, other): + """The inner product operator for a Vector or Dyadic, and a Dyadic + + This is for: Vector dot Dyadic + + Parameters + ========== + + other : Vector + The vector we are dotting with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dot, outer + >>> N = ReferenceFrame('N') + >>> d = outer(N.x, N.x) + >>> dot(N.x, d) + N.x + + """ + + from sympy.physics.vector.vector import Vector, _check_vector + other = _check_vector(other) + ol = Vector(0) + for i, v in enumerate(self.args): + ol += v[0] * v[2] * (v[1] & other) + return ol + + def __rsub__(self, other): + return (-1 * self) + other + + def __rxor__(self, other): + """For a cross product in the form: Vector x Dyadic + + Parameters + ========== + + other : Vector + The Vector that we are crossing this Dyadic with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer, cross + >>> N = ReferenceFrame('N') + >>> d = outer(N.x, N.x) + >>> cross(N.y, d) + - (N.z|N.x) + + """ + + from sympy.physics.vector.vector import _check_vector + other = _check_vector(other) + ol = Dyadic(0) + for i, v in enumerate(self.args): + ol += v[0] * ((other ^ v[1]) | v[2]) + return ol + + def _sympystr(self, printer): + """Printing method. """ + ar = self.args # just to shorten things + if len(ar) == 0: + return printer._print(0) + ol = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + # if the coef of the dyadic is 1, we skip the 1 + if ar[i][0] == 1: + ol.append(' + (' + printer._print(ar[i][1]) + '|' + + printer._print(ar[i][2]) + ')') + # if the coef of the dyadic is -1, we skip the 1 + elif ar[i][0] == -1: + ol.append(' - (' + printer._print(ar[i][1]) + '|' + + printer._print(ar[i][2]) + ')') + # If the coefficient of the dyadic is not 1 or -1, + # we might wrap it in parentheses, for readability. + elif ar[i][0] != 0: + arg_str = printer._print(ar[i][0]) + if isinstance(ar[i][0], Add): + arg_str = "(%s)" % arg_str + if arg_str[0] == '-': + arg_str = arg_str[1:] + str_start = ' - ' + else: + str_start = ' + ' + ol.append(str_start + arg_str + '*(' + + printer._print(ar[i][1]) + + '|' + printer._print(ar[i][2]) + ')') + outstr = ''.join(ol) + if outstr.startswith(' + '): + outstr = outstr[3:] + elif outstr.startswith(' '): + outstr = outstr[1:] + return outstr + + def __sub__(self, other): + """The subtraction operator. """ + return self.__add__(other * -1) + + def __xor__(self, other): + """For a cross product in the form: Dyadic x Vector. + + Parameters + ========== + + other : Vector + The Vector that we are crossing this Dyadic with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer, cross + >>> N = ReferenceFrame('N') + >>> d = outer(N.x, N.x) + >>> cross(d, N.y) + (N.x|N.z) + + """ + + from sympy.physics.vector.vector import _check_vector + other = _check_vector(other) + ol = Dyadic(0) + for i, v in enumerate(self.args): + ol += v[0] * (v[1] | (v[2] ^ other)) + return ol + + __radd__ = __add__ + __rmul__ = __mul__ + + def express(self, frame1, frame2=None): + """Expresses this Dyadic in alternate frame(s) + + The first frame is the list side expression, the second frame is the + right side; if Dyadic is in form A.x|B.y, you can express it in two + different frames. If no second frame is given, the Dyadic is + expressed in only one frame. + + Calls the global express function + + Parameters + ========== + + frame1 : ReferenceFrame + The frame to express the left side of the Dyadic in + frame2 : ReferenceFrame + If provided, the frame to express the right side of the Dyadic in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> N = ReferenceFrame('N') + >>> q = dynamicsymbols('q') + >>> B = N.orientnew('B', 'Axis', [q, N.z]) + >>> d = outer(N.x, N.x) + >>> d.express(B, N) + cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x) + + """ + from sympy.physics.vector.functions import express + return express(self, frame1, frame2) + + def to_matrix(self, reference_frame, second_reference_frame=None): + """Returns the matrix form of the dyadic with respect to one or two + reference frames. + + Parameters + ---------- + reference_frame : ReferenceFrame + The reference frame that the rows and columns of the matrix + correspond to. If a second reference frame is provided, this + only corresponds to the rows of the matrix. + second_reference_frame : ReferenceFrame, optional, default=None + The reference frame that the columns of the matrix correspond + to. + + Returns + ------- + matrix : ImmutableMatrix, shape(3,3) + The matrix that gives the 2D tensor form. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame, Vector + >>> Vector.simp = True + >>> from sympy.physics.mechanics import inertia + >>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz') + >>> N = ReferenceFrame('N') + >>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz) + >>> inertia_dyadic.to_matrix(N) + Matrix([ + [Ixx, Ixy, Ixz], + [Ixy, Iyy, Iyz], + [Ixz, Iyz, Izz]]) + >>> beta = symbols('beta') + >>> A = N.orientnew('A', 'Axis', (beta, N.x)) + >>> inertia_dyadic.to_matrix(A) + Matrix([ + [ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)], + [ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2], + [-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]]) + + """ + + if second_reference_frame is None: + second_reference_frame = reference_frame + + return Matrix([i.dot(self).dot(j) for i in reference_frame for j in + second_reference_frame]).reshape(3, 3) + + def doit(self, **hints): + """Calls .doit() on each term in the Dyadic""" + return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])]) + for v in self.args], Dyadic(0)) + + def dt(self, frame): + """Take the time derivative of this Dyadic in a frame. + + This function calls the global time_derivative method + + Parameters + ========== + + frame : ReferenceFrame + The frame to take the time derivative in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> N = ReferenceFrame('N') + >>> q = dynamicsymbols('q') + >>> B = N.orientnew('B', 'Axis', [q, N.z]) + >>> d = outer(N.x, N.x) + >>> d.dt(B) + - q'*(N.y|N.x) - q'*(N.x|N.y) + + """ + from sympy.physics.vector.functions import time_derivative + return time_derivative(self, frame) + + def simplify(self): + """Returns a simplified Dyadic.""" + out = Dyadic(0) + for v in self.args: + out += Dyadic([(v[0].simplify(), v[1], v[2])]) + return out + + def subs(self, *args, **kwargs): + """Substitution on the Dyadic. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy import Symbol + >>> N = ReferenceFrame('N') + >>> s = Symbol('s') + >>> a = s*(N.x|N.x) + >>> a.subs({s: 2}) + 2*(N.x|N.x) + + """ + + return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])]) + for v in self.args], Dyadic(0)) + + def applyfunc(self, f): + """Apply a function to each component of a Dyadic.""" + if not callable(f): + raise TypeError("`f` must be callable.") + + out = Dyadic(0) + for a, b, c in self.args: + out += f(a) * (b | c) + return out + + dot = __and__ + cross = __xor__ + + def _eval_evalf(self, prec): + if not self.args: + return self + new_args = [] + dps = prec_to_dps(prec) + for inlist in self.args: + new_inlist = list(inlist) + new_inlist[0] = inlist[0].evalf(n=dps) + new_args.append(tuple(new_inlist)) + return Dyadic(new_args) + + def xreplace(self, rule): + """ + Replace occurrences of objects within the measure numbers of the + Dyadic. + + Parameters + ========== + + rule : dict-like + Expresses a replacement rule. + + Returns + ======= + + Dyadic + Result of the replacement. + + Examples + ======== + + >>> from sympy import symbols, pi + >>> from sympy.physics.vector import ReferenceFrame, outer + >>> N = ReferenceFrame('N') + >>> D = outer(N.x, N.x) + >>> x, y, z = symbols('x y z') + >>> ((1 + x*y) * D).xreplace({x: pi}) + (pi*y + 1)*(N.x|N.x) + >>> ((1 + x*y) * D).xreplace({x: pi, y: 2}) + (1 + 2*pi)*(N.x|N.x) + + Replacements occur only if an entire node in the expression tree is + matched: + + >>> ((x*y + z) * D).xreplace({x*y: pi}) + (z + pi)*(N.x|N.x) + >>> ((x*y*z) * D).xreplace({x*y: pi}) + x*y*z*(N.x|N.x) + + """ + + new_args = [] + for inlist in self.args: + new_inlist = list(inlist) + new_inlist[0] = new_inlist[0].xreplace(rule) + new_args.append(tuple(new_inlist)) + return Dyadic(new_args) + + +def _check_dyadic(other): + if not isinstance(other, Dyadic): + raise TypeError('A Dyadic must be supplied') + return other diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/fieldfunctions.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/fieldfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..74169921c587385323b9080709999c65c6ca0843 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/fieldfunctions.py @@ -0,0 +1,313 @@ +from sympy.core.function import diff +from sympy.core.singleton import S +from sympy.integrals.integrals import integrate +from sympy.physics.vector import Vector, express +from sympy.physics.vector.frame import _check_frame +from sympy.physics.vector.vector import _check_vector + + +__all__ = ['curl', 'divergence', 'gradient', 'is_conservative', + 'is_solenoidal', 'scalar_potential', + 'scalar_potential_difference'] + + +def curl(vect, frame): + """ + Returns the curl of a vector field computed wrt the coordinate + symbols of the given frame. + + Parameters + ========== + + vect : Vector + The vector operand + + frame : ReferenceFrame + The reference frame to calculate the curl in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import curl + >>> R = ReferenceFrame('R') + >>> v1 = R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z + >>> curl(v1, R) + 0 + >>> v2 = R[0]*R[1]*R[2]*R.x + >>> curl(v2, R) + R_x*R_y*R.y - R_x*R_z*R.z + + """ + + _check_vector(vect) + if vect == 0: + return Vector(0) + vect = express(vect, frame, variables=True) + # A mechanical approach to avoid looping overheads + vectx = vect.dot(frame.x) + vecty = vect.dot(frame.y) + vectz = vect.dot(frame.z) + outvec = Vector(0) + outvec += (diff(vectz, frame[1]) - diff(vecty, frame[2])) * frame.x + outvec += (diff(vectx, frame[2]) - diff(vectz, frame[0])) * frame.y + outvec += (diff(vecty, frame[0]) - diff(vectx, frame[1])) * frame.z + return outvec + + +def divergence(vect, frame): + """ + Returns the divergence of a vector field computed wrt the coordinate + symbols of the given frame. + + Parameters + ========== + + vect : Vector + The vector operand + + frame : ReferenceFrame + The reference frame to calculate the divergence in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import divergence + >>> R = ReferenceFrame('R') + >>> v1 = R[0]*R[1]*R[2] * (R.x+R.y+R.z) + >>> divergence(v1, R) + R_x*R_y + R_x*R_z + R_y*R_z + >>> v2 = 2*R[1]*R[2]*R.y + >>> divergence(v2, R) + 2*R_z + + """ + + _check_vector(vect) + if vect == 0: + return S.Zero + vect = express(vect, frame, variables=True) + vectx = vect.dot(frame.x) + vecty = vect.dot(frame.y) + vectz = vect.dot(frame.z) + out = S.Zero + out += diff(vectx, frame[0]) + out += diff(vecty, frame[1]) + out += diff(vectz, frame[2]) + return out + + +def gradient(scalar, frame): + """ + Returns the vector gradient of a scalar field computed wrt the + coordinate symbols of the given frame. + + Parameters + ========== + + scalar : sympifiable + The scalar field to take the gradient of + + frame : ReferenceFrame + The frame to calculate the gradient in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import gradient + >>> R = ReferenceFrame('R') + >>> s1 = R[0]*R[1]*R[2] + >>> gradient(s1, R) + R_y*R_z*R.x + R_x*R_z*R.y + R_x*R_y*R.z + >>> s2 = 5*R[0]**2*R[2] + >>> gradient(s2, R) + 10*R_x*R_z*R.x + 5*R_x**2*R.z + + """ + + _check_frame(frame) + outvec = Vector(0) + scalar = express(scalar, frame, variables=True) + for i, x in enumerate(frame): + outvec += diff(scalar, frame[i]) * x + return outvec + + +def is_conservative(field): + """ + Checks if a field is conservative. + + Parameters + ========== + + field : Vector + The field to check for conservative property + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import is_conservative + >>> R = ReferenceFrame('R') + >>> is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) + True + >>> is_conservative(R[2] * R.y) + False + + """ + + # Field is conservative irrespective of frame + # Take the first frame in the result of the separate method of Vector + if field == Vector(0): + return True + frame = list(field.separate())[0] + return curl(field, frame).simplify() == Vector(0) + + +def is_solenoidal(field): + """ + Checks if a field is solenoidal. + + Parameters + ========== + + field : Vector + The field to check for solenoidal property + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import is_solenoidal + >>> R = ReferenceFrame('R') + >>> is_solenoidal(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) + True + >>> is_solenoidal(R[1] * R.y) + False + + """ + + # Field is solenoidal irrespective of frame + # Take the first frame in the result of the separate method in Vector + if field == Vector(0): + return True + frame = list(field.separate())[0] + return divergence(field, frame).simplify() is S.Zero + + +def scalar_potential(field, frame): + """ + Returns the scalar potential function of a field in a given frame + (without the added integration constant). + + Parameters + ========== + + field : Vector + The vector field whose scalar potential function is to be + calculated + + frame : ReferenceFrame + The frame to do the calculation in + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy.physics.vector import scalar_potential, gradient + >>> R = ReferenceFrame('R') + >>> scalar_potential(R.z, R) == R[2] + True + >>> scalar_field = 2*R[0]**2*R[1]*R[2] + >>> grad_field = gradient(scalar_field, R) + >>> scalar_potential(grad_field, R) + 2*R_x**2*R_y*R_z + + """ + + # Check whether field is conservative + if not is_conservative(field): + raise ValueError("Field is not conservative") + if field == Vector(0): + return S.Zero + # Express the field exntirely in frame + # Substitute coordinate variables also + _check_frame(frame) + field = express(field, frame, variables=True) + # Make a list of dimensions of the frame + dimensions = list(frame) + # Calculate scalar potential function + temp_function = integrate(field.dot(dimensions[0]), frame[0]) + for i, dim in enumerate(dimensions[1:]): + partial_diff = diff(temp_function, frame[i + 1]) + partial_diff = field.dot(dim) - partial_diff + temp_function += integrate(partial_diff, frame[i + 1]) + return temp_function + + +def scalar_potential_difference(field, frame, point1, point2, origin): + """ + Returns the scalar potential difference between two points in a + certain frame, wrt a given field. + + If a scalar field is provided, its values at the two points are + considered. If a conservative vector field is provided, the values + of its scalar potential function at the two points are used. + + Returns (potential at position 2) - (potential at position 1) + + Parameters + ========== + + field : Vector/sympyfiable + The field to calculate wrt + + frame : ReferenceFrame + The frame to do the calculations in + + point1 : Point + The initial Point in given frame + + position2 : Point + The second Point in the given frame + + origin : Point + The Point to use as reference point for position vector + calculation + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, Point + >>> from sympy.physics.vector import scalar_potential_difference + >>> R = ReferenceFrame('R') + >>> O = Point('O') + >>> P = O.locatenew('P', R[0]*R.x + R[1]*R.y + R[2]*R.z) + >>> vectfield = 4*R[0]*R[1]*R.x + 2*R[0]**2*R.y + >>> scalar_potential_difference(vectfield, R, O, P, O) + 2*R_x**2*R_y + >>> Q = O.locatenew('O', 3*R.x + R.y + 2*R.z) + >>> scalar_potential_difference(vectfield, R, P, Q, O) + -2*R_x**2*R_y + 18 + + """ + + _check_frame(frame) + if isinstance(field, Vector): + # Get the scalar potential function + scalar_fn = scalar_potential(field, frame) + else: + # Field is a scalar + scalar_fn = field + # Express positions in required frame + position1 = express(point1.pos_from(origin), frame, variables=True) + position2 = express(point2.pos_from(origin), frame, variables=True) + # Get the two positions as substitution dicts for coordinate variables + subs_dict1 = {} + subs_dict2 = {} + for i, x in enumerate(frame): + subs_dict1[frame[i]] = x.dot(position1) + subs_dict2[frame[i]] = x.dot(position2) + return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/frame.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/frame.py new file mode 100644 index 0000000000000000000000000000000000000000..23f9c0a841deaab89681e69d3962a48ee94993f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/frame.py @@ -0,0 +1,1435 @@ +from sympy.core.backend import (diff, expand, sin, cos, sympify, eye, zeros, + ImmutableMatrix as Matrix, MatrixBase) +from sympy.core.symbol import Symbol +from sympy.simplify.trigsimp import trigsimp +from sympy.physics.vector.vector import Vector, _check_vector +from sympy.utilities.misc import translate + +from warnings import warn + +__all__ = ['CoordinateSym', 'ReferenceFrame'] + + +class CoordinateSym(Symbol): + """ + A coordinate symbol/base scalar associated wrt a Reference Frame. + + Ideally, users should not instantiate this class. Instances of + this class must only be accessed through the corresponding frame + as 'frame[index]'. + + CoordinateSyms having the same frame and index parameters are equal + (even though they may be instantiated separately). + + Parameters + ========== + + name : string + The display name of the CoordinateSym + + frame : ReferenceFrame + The reference frame this base scalar belongs to + + index : 0, 1 or 2 + The index of the dimension denoted by this coordinate variable + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, CoordinateSym + >>> A = ReferenceFrame('A') + >>> A[1] + A_y + >>> type(A[0]) + + >>> a_y = CoordinateSym('a_y', A, 1) + >>> a_y == A[1] + True + + """ + + def __new__(cls, name, frame, index): + # We can't use the cached Symbol.__new__ because this class depends on + # frame and index, which are not passed to Symbol.__xnew__. + assumptions = {} + super()._sanitize(assumptions, cls) + obj = super().__xnew__(cls, name, **assumptions) + _check_frame(frame) + if index not in range(0, 3): + raise ValueError("Invalid index specified") + obj._id = (frame, index) + return obj + + @property + def frame(self): + return self._id[0] + + def __eq__(self, other): + # Check if the other object is a CoordinateSym of the same frame and + # same index + if isinstance(other, CoordinateSym): + if other._id == self._id: + return True + return False + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return (self._id[0].__hash__(), self._id[1]).__hash__() + + +class ReferenceFrame: + """A reference frame in classical mechanics. + + ReferenceFrame is a class used to represent a reference frame in classical + mechanics. It has a standard basis of three unit vectors in the frame's + x, y, and z directions. + + It also can have a rotation relative to a parent frame; this rotation is + defined by a direction cosine matrix relating this frame's basis vectors to + the parent frame's basis vectors. It can also have an angular velocity + vector, defined in another frame. + + """ + _count = 0 + + def __init__(self, name, indices=None, latexs=None, variables=None): + """ReferenceFrame initialization method. + + A ReferenceFrame has a set of orthonormal basis vectors, along with + orientations relative to other ReferenceFrames and angular velocities + relative to other ReferenceFrames. + + Parameters + ========== + + indices : tuple of str + Enables the reference frame's basis unit vectors to be accessed by + Python's square bracket indexing notation using the provided three + indice strings and alters the printing of the unit vectors to + reflect this choice. + latexs : tuple of str + Alters the LaTeX printing of the reference frame's basis unit + vectors to the provided three valid LaTeX strings. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, vlatex + >>> N = ReferenceFrame('N') + >>> N.x + N.x + >>> O = ReferenceFrame('O', indices=('1', '2', '3')) + >>> O.x + O['1'] + >>> O['1'] + O['1'] + >>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3')) + >>> vlatex(P.x) + 'A1' + + ``symbols()`` can be used to create multiple Reference Frames in one + step, for example: + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy import symbols + >>> A, B, C = symbols('A B C', cls=ReferenceFrame) + >>> D, E = symbols('D E', cls=ReferenceFrame, indices=('1', '2', '3')) + >>> A[0] + A_x + >>> D.x + D['1'] + >>> E.y + E['2'] + >>> type(A) == type(D) + True + + """ + + if not isinstance(name, str): + raise TypeError('Need to supply a valid name') + # The if statements below are for custom printing of basis-vectors for + # each frame. + # First case, when custom indices are supplied + if indices is not None: + if not isinstance(indices, (tuple, list)): + raise TypeError('Supply the indices as a list') + if len(indices) != 3: + raise ValueError('Supply 3 indices') + for i in indices: + if not isinstance(i, str): + raise TypeError('Indices must be strings') + self.str_vecs = [(name + '[\'' + indices[0] + '\']'), + (name + '[\'' + indices[1] + '\']'), + (name + '[\'' + indices[2] + '\']')] + self.pretty_vecs = [(name.lower() + "_" + indices[0]), + (name.lower() + "_" + indices[1]), + (name.lower() + "_" + indices[2])] + self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), + indices[0])), + (r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), + indices[1])), + (r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), + indices[2]))] + self.indices = indices + # Second case, when no custom indices are supplied + else: + self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')] + self.pretty_vecs = [name.lower() + "_x", + name.lower() + "_y", + name.lower() + "_z"] + self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()), + (r"\mathbf{\hat{%s}_y}" % name.lower()), + (r"\mathbf{\hat{%s}_z}" % name.lower())] + self.indices = ['x', 'y', 'z'] + # Different step, for custom latex basis vectors + if latexs is not None: + if not isinstance(latexs, (tuple, list)): + raise TypeError('Supply the indices as a list') + if len(latexs) != 3: + raise ValueError('Supply 3 indices') + for i in latexs: + if not isinstance(i, str): + raise TypeError('Latex entries must be strings') + self.latex_vecs = latexs + self.name = name + self._var_dict = {} + # The _dcm_dict dictionary will only store the dcms of adjacent + # parent-child relationships. The _dcm_cache dictionary will store + # calculated dcm along with all content of _dcm_dict for faster + # retrieval of dcms. + self._dcm_dict = {} + self._dcm_cache = {} + self._ang_vel_dict = {} + self._ang_acc_dict = {} + self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict] + self._cur = 0 + self._x = Vector([(Matrix([1, 0, 0]), self)]) + self._y = Vector([(Matrix([0, 1, 0]), self)]) + self._z = Vector([(Matrix([0, 0, 1]), self)]) + # Associate coordinate symbols wrt this frame + if variables is not None: + if not isinstance(variables, (tuple, list)): + raise TypeError('Supply the variable names as a list/tuple') + if len(variables) != 3: + raise ValueError('Supply 3 variable names') + for i in variables: + if not isinstance(i, str): + raise TypeError('Variable names must be strings') + else: + variables = [name + '_x', name + '_y', name + '_z'] + self.varlist = (CoordinateSym(variables[0], self, 0), + CoordinateSym(variables[1], self, 1), + CoordinateSym(variables[2], self, 2)) + ReferenceFrame._count += 1 + self.index = ReferenceFrame._count + + def __getitem__(self, ind): + """ + Returns basis vector for the provided index, if the index is a string. + + If the index is a number, returns the coordinate variable correspon- + -ding to that index. + """ + if not isinstance(ind, str): + if ind < 3: + return self.varlist[ind] + else: + raise ValueError("Invalid index provided") + if self.indices[0] == ind: + return self.x + if self.indices[1] == ind: + return self.y + if self.indices[2] == ind: + return self.z + else: + raise ValueError('Not a defined index') + + def __iter__(self): + return iter([self.x, self.y, self.z]) + + def __str__(self): + """Returns the name of the frame. """ + return self.name + + __repr__ = __str__ + + def _dict_list(self, other, num): + """Returns an inclusive list of reference frames that connect this + reference frame to the provided reference frame. + + Parameters + ========== + other : ReferenceFrame + The other reference frame to look for a connecting relationship to. + num : integer + ``0``, ``1``, and ``2`` will look for orientation, angular + velocity, and angular acceleration relationships between the two + frames, respectively. + + Returns + ======= + list + Inclusive list of reference frames that connect this reference + frame to the other reference frame. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> A = ReferenceFrame('A') + >>> B = ReferenceFrame('B') + >>> C = ReferenceFrame('C') + >>> D = ReferenceFrame('D') + >>> B.orient_axis(A, A.x, 1.0) + >>> C.orient_axis(B, B.x, 1.0) + >>> D.orient_axis(C, C.x, 1.0) + >>> D._dict_list(A, 0) + [D, C, B, A] + + Raises + ====== + + ValueError + When no path is found between the two reference frames or ``num`` + is an incorrect value. + + """ + + connect_type = {0: 'orientation', + 1: 'angular velocity', + 2: 'angular acceleration'} + + if num not in connect_type.keys(): + raise ValueError('Valid values for num are 0, 1, or 2.') + + possible_connecting_paths = [[self]] + oldlist = [[]] + while possible_connecting_paths != oldlist: + oldlist = possible_connecting_paths[:] # make a copy + for frame_list in possible_connecting_paths: + frames_adjacent_to_last = frame_list[-1]._dlist[num].keys() + for adjacent_frame in frames_adjacent_to_last: + if adjacent_frame not in frame_list: + connecting_path = frame_list + [adjacent_frame] + if connecting_path not in possible_connecting_paths: + possible_connecting_paths.append(connecting_path) + + for connecting_path in oldlist: + if connecting_path[-1] != other: + possible_connecting_paths.remove(connecting_path) + possible_connecting_paths.sort(key=len) + + if len(possible_connecting_paths) != 0: + return possible_connecting_paths[0] # selects the shortest path + + msg = 'No connecting {} path found between {} and {}.' + raise ValueError(msg.format(connect_type[num], self.name, other.name)) + + def _w_diff_dcm(self, otherframe): + """Angular velocity from time differentiating the DCM. """ + from sympy.physics.vector.functions import dynamicsymbols + dcm2diff = otherframe.dcm(self) + diffed = dcm2diff.diff(dynamicsymbols._t) + angvelmat = diffed * dcm2diff.T + w1 = trigsimp(expand(angvelmat[7]), recursive=True) + w2 = trigsimp(expand(angvelmat[2]), recursive=True) + w3 = trigsimp(expand(angvelmat[3]), recursive=True) + return Vector([(Matrix([w1, w2, w3]), otherframe)]) + + def variable_map(self, otherframe): + """ + Returns a dictionary which expresses the coordinate variables + of this frame in terms of the variables of otherframe. + + If Vector.simp is True, returns a simplified version of the mapped + values. Else, returns them without simplification. + + Simplification of the expressions may take time. + + Parameters + ========== + + otherframe : ReferenceFrame + The other frame to map the variables to + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols + >>> A = ReferenceFrame('A') + >>> q = dynamicsymbols('q') + >>> B = A.orientnew('B', 'Axis', [q, A.z]) + >>> A.variable_map(B) + {A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z} + + """ + + _check_frame(otherframe) + if (otherframe, Vector.simp) in self._var_dict: + return self._var_dict[(otherframe, Vector.simp)] + else: + vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist) + mapping = {} + for i, x in enumerate(self): + if Vector.simp: + mapping[self.varlist[i]] = trigsimp(vars_matrix[i], + method='fu') + else: + mapping[self.varlist[i]] = vars_matrix[i] + self._var_dict[(otherframe, Vector.simp)] = mapping + return mapping + + def ang_acc_in(self, otherframe): + """Returns the angular acceleration Vector of the ReferenceFrame. + + Effectively returns the Vector: + + ``N_alpha_B`` + + which represent the angular acceleration of B in N, where B is self, + and N is otherframe. + + Parameters + ========== + + otherframe : ReferenceFrame + The ReferenceFrame which the angular acceleration is returned in. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> V = 10 * N.x + >>> A.set_ang_acc(N, V) + >>> A.ang_acc_in(N) + 10*N.x + + """ + + _check_frame(otherframe) + if otherframe in self._ang_acc_dict: + return self._ang_acc_dict[otherframe] + else: + return self.ang_vel_in(otherframe).dt(otherframe) + + def ang_vel_in(self, otherframe): + """Returns the angular velocity Vector of the ReferenceFrame. + + Effectively returns the Vector: + + ^N omega ^B + + which represent the angular velocity of B in N, where B is self, and + N is otherframe. + + Parameters + ========== + + otherframe : ReferenceFrame + The ReferenceFrame which the angular velocity is returned in. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> V = 10 * N.x + >>> A.set_ang_vel(N, V) + >>> A.ang_vel_in(N) + 10*N.x + + """ + + _check_frame(otherframe) + flist = self._dict_list(otherframe, 1) + outvec = Vector(0) + for i in range(len(flist) - 1): + outvec += flist[i]._ang_vel_dict[flist[i + 1]] + return outvec + + def dcm(self, otherframe): + r"""Returns the direction cosine matrix of this reference frame + relative to the provided reference frame. + + The returned matrix can be used to express the orthogonal unit vectors + of this frame in terms of the orthogonal unit vectors of + ``otherframe``. + + Parameters + ========== + + otherframe : ReferenceFrame + The reference frame which the direction cosine matrix of this frame + is formed relative to. + + Examples + ======== + + The following example rotates the reference frame A relative to N by a + simple rotation and then calculates the direction cosine matrix of N + relative to A. + + >>> from sympy import symbols, sin, cos + >>> from sympy.physics.vector import ReferenceFrame + >>> q1 = symbols('q1') + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> A.orient_axis(N, q1, N.x) + >>> N.dcm(A) + Matrix([ + [1, 0, 0], + [0, cos(q1), -sin(q1)], + [0, sin(q1), cos(q1)]]) + + The second row of the above direction cosine matrix represents the + ``N.y`` unit vector in N expressed in A. Like so: + + >>> Ny = 0*A.x + cos(q1)*A.y - sin(q1)*A.z + + Thus, expressing ``N.y`` in A should return the same result: + + >>> N.y.express(A) + cos(q1)*A.y - sin(q1)*A.z + + Notes + ===== + + It is important to know what form of the direction cosine matrix is + returned. If ``B.dcm(A)`` is called, it means the "direction cosine + matrix of B rotated relative to A". This is the matrix + :math:`{}^B\mathbf{C}^A` shown in the following relationship: + + .. math:: + + \begin{bmatrix} + \hat{\mathbf{b}}_1 \\ + \hat{\mathbf{b}}_2 \\ + \hat{\mathbf{b}}_3 + \end{bmatrix} + = + {}^B\mathbf{C}^A + \begin{bmatrix} + \hat{\mathbf{a}}_1 \\ + \hat{\mathbf{a}}_2 \\ + \hat{\mathbf{a}}_3 + \end{bmatrix}. + + :math:`{}^B\mathbf{C}^A` is the matrix that expresses the B unit + vectors in terms of the A unit vectors. + + """ + + _check_frame(otherframe) + # Check if the dcm wrt that frame has already been calculated + if otherframe in self._dcm_cache: + return self._dcm_cache[otherframe] + flist = self._dict_list(otherframe, 0) + outdcm = eye(3) + for i in range(len(flist) - 1): + outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]] + # After calculation, store the dcm in dcm cache for faster future + # retrieval + self._dcm_cache[otherframe] = outdcm + otherframe._dcm_cache[self] = outdcm.T + return outdcm + + def _dcm(self, parent, parent_orient): + # If parent.oreint(self) is already defined,then + # update the _dcm_dict of parent while over write + # all content of self._dcm_dict and self._dcm_cache + # with new dcm relation. + # Else update _dcm_cache and _dcm_dict of both + # self and parent. + frames = self._dcm_cache.keys() + dcm_dict_del = [] + dcm_cache_del = [] + if parent in frames: + for frame in frames: + if frame in self._dcm_dict: + dcm_dict_del += [frame] + dcm_cache_del += [frame] + # Reset the _dcm_cache of this frame, and remove it from the + # _dcm_caches of the frames it is linked to. Also remove it from + # the _dcm_dict of its parent + for frame in dcm_dict_del: + del frame._dcm_dict[self] + for frame in dcm_cache_del: + del frame._dcm_cache[self] + # Reset the _dcm_dict + self._dcm_dict = self._dlist[0] = {} + # Reset the _dcm_cache + self._dcm_cache = {} + + else: + # Check for loops and raise warning accordingly. + visited = [] + queue = list(frames) + cont = True # Flag to control queue loop. + while queue and cont: + node = queue.pop(0) + if node not in visited: + visited.append(node) + neighbors = node._dcm_dict.keys() + for neighbor in neighbors: + if neighbor == parent: + warn('Loops are defined among the orientation of ' + 'frames. This is likely not desired and may ' + 'cause errors in your calculations.') + cont = False + break + queue.append(neighbor) + + # Add the dcm relationship to _dcm_dict + self._dcm_dict.update({parent: parent_orient.T}) + parent._dcm_dict.update({self: parent_orient}) + # Update the dcm cache + self._dcm_cache.update({parent: parent_orient.T}) + parent._dcm_cache.update({self: parent_orient}) + + def orient_axis(self, parent, axis, angle): + """Sets the orientation of this reference frame with respect to a + parent reference frame by rotating through an angle about an axis fixed + in the parent reference frame. + + Parameters + ========== + + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + axis : Vector + Vector fixed in the parent frame about about which this frame is + rotated. It need not be a unit vector and the rotation follows the + right hand rule. + angle : sympifiable + Angle in radians by which it the frame is to be rotated. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + Examples + ======== + + Setup variables for the examples: + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame + >>> q1 = symbols('q1') + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + >>> B.orient_axis(N, N.x, q1) + + The ``orient_axis()`` method generates a direction cosine matrix and + its transpose which defines the orientation of B relative to N and vice + versa. Once orient is called, ``dcm()`` outputs the appropriate + direction cosine matrix: + + >>> B.dcm(N) + Matrix([ + [1, 0, 0], + [0, cos(q1), sin(q1)], + [0, -sin(q1), cos(q1)]]) + >>> N.dcm(B) + Matrix([ + [1, 0, 0], + [0, cos(q1), -sin(q1)], + [0, sin(q1), cos(q1)]]) + + The following two lines show that the sense of the rotation can be + defined by negating the vector direction or the angle. Both lines + produce the same result. + + >>> B.orient_axis(N, -N.x, q1) + >>> B.orient_axis(N, N.x, -q1) + + """ + + from sympy.physics.vector.functions import dynamicsymbols + _check_frame(parent) + + if not isinstance(axis, Vector) and isinstance(angle, Vector): + axis, angle = angle, axis + + axis = _check_vector(axis) + theta = sympify(angle) + + if not axis.dt(parent) == 0: + raise ValueError('Axis cannot be time-varying.') + unit_axis = axis.express(parent).normalize() + unit_col = unit_axis.args[0][0] + parent_orient_axis = ( + (eye(3) - unit_col * unit_col.T) * cos(theta) + + Matrix([[0, -unit_col[2], unit_col[1]], + [unit_col[2], 0, -unit_col[0]], + [-unit_col[1], unit_col[0], 0]]) * + sin(theta) + unit_col * unit_col.T) + + self._dcm(parent, parent_orient_axis) + + thetad = (theta).diff(dynamicsymbols._t) + wvec = thetad*axis.express(parent).normalize() + self._ang_vel_dict.update({parent: wvec}) + parent._ang_vel_dict.update({self: -wvec}) + self._var_dict = {} + + def orient_explicit(self, parent, dcm): + """Sets the orientation of this reference frame relative to a parent + reference frame by explicitly setting the direction cosine matrix. + + Parameters + ========== + + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + dcm : Matrix, shape(3, 3) + Direction cosine matrix that specifies the relative rotation + between the two reference frames. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + Examples + ======== + + Setup variables for the examples: + + >>> from sympy import symbols, Matrix, sin, cos + >>> from sympy.physics.vector import ReferenceFrame + >>> q1 = symbols('q1') + >>> A = ReferenceFrame('A') + >>> B = ReferenceFrame('B') + >>> N = ReferenceFrame('N') + + A simple rotation of ``A`` relative to ``N`` about ``N.x`` is defined + by the following direction cosine matrix: + + >>> dcm = Matrix([[1, 0, 0], + ... [0, cos(q1), -sin(q1)], + ... [0, sin(q1), cos(q1)]]) + >>> A.orient_explicit(N, dcm) + >>> A.dcm(N) + Matrix([ + [1, 0, 0], + [0, cos(q1), sin(q1)], + [0, -sin(q1), cos(q1)]]) + + This is equivalent to using ``orient_axis()``: + + >>> B.orient_axis(N, N.x, q1) + >>> B.dcm(N) + Matrix([ + [1, 0, 0], + [0, cos(q1), sin(q1)], + [0, -sin(q1), cos(q1)]]) + + **Note carefully that** ``N.dcm(B)`` **(the transpose) would be passed + into** ``orient_explicit()`` **for** ``A.dcm(N)`` **to match** + ``B.dcm(N)``: + + >>> A.orient_explicit(N, N.dcm(B)) + >>> A.dcm(N) + Matrix([ + [1, 0, 0], + [0, cos(q1), sin(q1)], + [0, -sin(q1), cos(q1)]]) + + """ + + _check_frame(parent) + # amounts must be a Matrix type object + # (e.g. sympy.matrices.dense.MutableDenseMatrix). + if not isinstance(dcm, MatrixBase): + raise TypeError("Amounts must be a SymPy Matrix type object.") + + parent_orient_dcm = dcm + + self._dcm(parent, parent_orient_dcm) + + wvec = self._w_diff_dcm(parent) + self._ang_vel_dict.update({parent: wvec}) + parent._ang_vel_dict.update({self: -wvec}) + self._var_dict = {} + + def _rot(self, axis, angle): + """DCM for simple axis 1,2,or 3 rotations.""" + if axis == 1: + return Matrix([[1, 0, 0], + [0, cos(angle), -sin(angle)], + [0, sin(angle), cos(angle)]]) + elif axis == 2: + return Matrix([[cos(angle), 0, sin(angle)], + [0, 1, 0], + [-sin(angle), 0, cos(angle)]]) + elif axis == 3: + return Matrix([[cos(angle), -sin(angle), 0], + [sin(angle), cos(angle), 0], + [0, 0, 1]]) + + def _parse_consecutive_rotations(self, angles, rotation_order): + """Helper for orient_body_fixed and orient_space_fixed. + + Parameters + ========== + angles : 3-tuple of sympifiable + Three angles in radians used for the successive rotations. + rotation_order : 3 character string or 3 digit integer + Order of the rotations. The order can be specified by the strings + ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique + valid rotation orders. + + Returns + ======= + + amounts : list + List of sympifiables corresponding to the rotation angles. + rot_order : list + List of integers corresponding to the axis of rotation. + rot_matrices : list + List of DCM around the given axis with corresponding magnitude. + + """ + amounts = list(angles) + for i, v in enumerate(amounts): + if not isinstance(v, Vector): + amounts[i] = sympify(v) + + approved_orders = ('123', '231', '312', '132', '213', '321', '121', + '131', '212', '232', '313', '323', '') + # make sure XYZ => 123 + rot_order = translate(str(rotation_order), 'XYZxyz', '123123') + if rot_order not in approved_orders: + raise TypeError('The rotation order is not a valid order.') + + rot_order = [int(r) for r in rot_order] + if not (len(amounts) == 3 & len(rot_order) == 3): + raise TypeError('Body orientation takes 3 values & 3 orders') + rot_matrices = [self._rot(order, amount) + for (order, amount) in zip(rot_order, amounts)] + return amounts, rot_order, rot_matrices + + def orient_body_fixed(self, parent, angles, rotation_order): + """Rotates this reference frame relative to the parent reference frame + by right hand rotating through three successive body fixed simple axis + rotations. Each subsequent axis of rotation is about the "body fixed" + unit vectors of a new intermediate reference frame. This type of + rotation is also referred to rotating through the `Euler and Tait-Bryan + Angles`_. + + .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles + + The computed angular velocity in this method is by default expressed in + the child's frame, so it is most preferable to use ``u1 * child.x + u2 * + child.y + u3 * child.z`` as generalized speeds. + + Parameters + ========== + + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + angles : 3-tuple of sympifiable + Three angles in radians used for the successive rotations. + rotation_order : 3 character string or 3 digit integer + Order of the rotations about each intermediate reference frames' + unit vectors. The Euler rotation about the X, Z', X'' axes can be + specified by the strings ``'XZX'``, ``'131'``, or the integer + ``131``. There are 12 unique valid rotation orders (6 Euler and 6 + Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx, + and yxz. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + Examples + ======== + + Setup variables for the examples: + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame + >>> q1, q2, q3 = symbols('q1, q2, q3') + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + >>> B1 = ReferenceFrame('B1') + >>> B2 = ReferenceFrame('B2') + >>> B3 = ReferenceFrame('B3') + + For example, a classic Euler Angle rotation can be done by: + + >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX') + >>> B.dcm(N) + Matrix([ + [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], + [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], + [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) + + This rotates reference frame B relative to reference frame N through + ``q1`` about ``N.x``, then rotates B again through ``q2`` about + ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to + three successive ``orient_axis()`` calls: + + >>> B1.orient_axis(N, N.x, q1) + >>> B2.orient_axis(B1, B1.y, q2) + >>> B3.orient_axis(B2, B2.x, q3) + >>> B3.dcm(N) + Matrix([ + [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], + [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], + [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) + + Acceptable rotation orders are of length 3, expressed in as a string + ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis + twice in a row are prohibited. + + >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ') + >>> B.orient_body_fixed(N, (q1, q2, 0), '121') + >>> B.orient_body_fixed(N, (q1, q2, q3), 123) + + """ + from sympy.physics.vector.functions import dynamicsymbols + + _check_frame(parent) + + amounts, rot_order, rot_matrices = self._parse_consecutive_rotations( + angles, rotation_order) + self._dcm(parent, rot_matrices[0] * rot_matrices[1] * rot_matrices[2]) + + rot_vecs = [zeros(3, 1) for _ in range(3)] + for i, order in enumerate(rot_order): + rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t) + u1, u2, u3 = rot_vecs[2] + rot_matrices[2].T * ( + rot_vecs[1] + rot_matrices[1].T * rot_vecs[0]) + wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double - + self._ang_vel_dict.update({parent: wvec}) + parent._ang_vel_dict.update({self: -wvec}) + self._var_dict = {} + + def orient_space_fixed(self, parent, angles, rotation_order): + """Rotates this reference frame relative to the parent reference frame + by right hand rotating through three successive space fixed simple axis + rotations. Each subsequent axis of rotation is about the "space fixed" + unit vectors of the parent reference frame. + + The computed angular velocity in this method is by default expressed in + the child's frame, so it is most preferable to use ``u1 * child.x + u2 * + child.y + u3 * child.z`` as generalized speeds. + + Parameters + ========== + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + angles : 3-tuple of sympifiable + Three angles in radians used for the successive rotations. + rotation_order : 3 character string or 3 digit integer + Order of the rotations about the parent reference frame's unit + vectors. The order can be specified by the strings ``'XZX'``, + ``'131'``, or the integer ``131``. There are 12 unique valid + rotation orders. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + Examples + ======== + + Setup variables for the examples: + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame + >>> q1, q2, q3 = symbols('q1, q2, q3') + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + >>> B1 = ReferenceFrame('B1') + >>> B2 = ReferenceFrame('B2') + >>> B3 = ReferenceFrame('B3') + + >>> B.orient_space_fixed(N, (q1, q2, q3), '312') + >>> B.dcm(N) + Matrix([ + [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], + [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], + [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) + + is equivalent to: + + >>> B1.orient_axis(N, N.z, q1) + >>> B2.orient_axis(B1, N.x, q2) + >>> B3.orient_axis(B2, N.y, q3) + >>> B3.dcm(N).simplify() + Matrix([ + [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], + [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], + [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) + + It is worth noting that space-fixed and body-fixed rotations are + related by the order of the rotations, i.e. the reverse order of body + fixed will give space fixed and vice versa. + + >>> B.orient_space_fixed(N, (q1, q2, q3), '231') + >>> B.dcm(N) + Matrix([ + [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], + [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], + [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) + + >>> B.orient_body_fixed(N, (q3, q2, q1), '132') + >>> B.dcm(N) + Matrix([ + [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], + [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], + [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) + + """ + from sympy.physics.vector.functions import dynamicsymbols + + _check_frame(parent) + + amounts, rot_order, rot_matrices = self._parse_consecutive_rotations( + angles, rotation_order) + self._dcm(parent, rot_matrices[2] * rot_matrices[1] * rot_matrices[0]) + + rot_vecs = [zeros(3, 1) for _ in range(3)] + for i, order in enumerate(rot_order): + rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t) + u1, u2, u3 = rot_vecs[0] + rot_matrices[0].T * ( + rot_vecs[1] + rot_matrices[1].T * rot_vecs[2]) + wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double - + self._ang_vel_dict.update({parent: wvec}) + parent._ang_vel_dict.update({self: -wvec}) + self._var_dict = {} + + def orient_quaternion(self, parent, numbers): + """Sets the orientation of this reference frame relative to a parent + reference frame via an orientation quaternion. An orientation + quaternion is defined as a finite rotation a unit vector, ``(lambda_x, + lambda_y, lambda_z)``, by an angle ``theta``. The orientation + quaternion is described by four parameters: + + - ``q0 = cos(theta/2)`` + - ``q1 = lambda_x*sin(theta/2)`` + - ``q2 = lambda_y*sin(theta/2)`` + - ``q3 = lambda_z*sin(theta/2)`` + + See `Quaternions and Spatial Rotation + `_ on + Wikipedia for more information. + + Parameters + ========== + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + numbers : 4-tuple of sympifiable + The four quaternion scalar numbers as defined above: ``q0``, + ``q1``, ``q2``, ``q3``. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + Examples + ======== + + Setup variables for the examples: + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame + >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3') + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + + Set the orientation: + + >>> B.orient_quaternion(N, (q0, q1, q2, q3)) + >>> B.dcm(N) + Matrix([ + [q0**2 + q1**2 - q2**2 - q3**2, 2*q0*q3 + 2*q1*q2, -2*q0*q2 + 2*q1*q3], + [ -2*q0*q3 + 2*q1*q2, q0**2 - q1**2 + q2**2 - q3**2, 2*q0*q1 + 2*q2*q3], + [ 2*q0*q2 + 2*q1*q3, -2*q0*q1 + 2*q2*q3, q0**2 - q1**2 - q2**2 + q3**2]]) + + """ + + from sympy.physics.vector.functions import dynamicsymbols + _check_frame(parent) + + numbers = list(numbers) + for i, v in enumerate(numbers): + if not isinstance(v, Vector): + numbers[i] = sympify(v) + + if not (isinstance(numbers, (list, tuple)) & (len(numbers) == 4)): + raise TypeError('Amounts are a list or tuple of length 4') + q0, q1, q2, q3 = numbers + parent_orient_quaternion = ( + Matrix([[q0**2 + q1**2 - q2**2 - q3**2, + 2 * (q1 * q2 - q0 * q3), + 2 * (q0 * q2 + q1 * q3)], + [2 * (q1 * q2 + q0 * q3), + q0**2 - q1**2 + q2**2 - q3**2, + 2 * (q2 * q3 - q0 * q1)], + [2 * (q1 * q3 - q0 * q2), + 2 * (q0 * q1 + q2 * q3), + q0**2 - q1**2 - q2**2 + q3**2]])) + + self._dcm(parent, parent_orient_quaternion) + + t = dynamicsymbols._t + q0, q1, q2, q3 = numbers + q0d = diff(q0, t) + q1d = diff(q1, t) + q2d = diff(q2, t) + q3d = diff(q3, t) + w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1) + w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2) + w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3) + wvec = Vector([(Matrix([w1, w2, w3]), self)]) + + self._ang_vel_dict.update({parent: wvec}) + parent._ang_vel_dict.update({self: -wvec}) + self._var_dict = {} + + def orient(self, parent, rot_type, amounts, rot_order=''): + """Sets the orientation of this reference frame relative to another + (parent) reference frame. + + .. note:: It is now recommended to use the ``.orient_axis, + .orient_body_fixed, .orient_space_fixed, .orient_quaternion`` + methods for the different rotation types. + + Parameters + ========== + + parent : ReferenceFrame + Reference frame that this reference frame will be rotated relative + to. + rot_type : str + The method used to generate the direction cosine matrix. Supported + methods are: + + - ``'Axis'``: simple rotations about a single common axis + - ``'DCM'``: for setting the direction cosine matrix directly + - ``'Body'``: three successive rotations about new intermediate + axes, also called "Euler and Tait-Bryan angles" + - ``'Space'``: three successive rotations about the parent + frames' unit vectors + - ``'Quaternion'``: rotations defined by four parameters which + result in a singularity free direction cosine matrix + + amounts : + Expressions defining the rotation angles or direction cosine + matrix. These must match the ``rot_type``. See examples below for + details. The input types are: + + - ``'Axis'``: 2-tuple (expr/sym/func, Vector) + - ``'DCM'``: Matrix, shape(3,3) + - ``'Body'``: 3-tuple of expressions, symbols, or functions + - ``'Space'``: 3-tuple of expressions, symbols, or functions + - ``'Quaternion'``: 4-tuple of expressions, symbols, or + functions + + rot_order : str or int, optional + If applicable, the order of the successive of rotations. The string + ``'123'`` and integer ``123`` are equivalent, for example. Required + for ``'Body'`` and ``'Space'``. + + Warns + ====== + + UserWarning + If the orientation creates a kinematic loop. + + """ + + _check_frame(parent) + + approved_orders = ('123', '231', '312', '132', '213', '321', '121', + '131', '212', '232', '313', '323', '') + rot_order = translate(str(rot_order), 'XYZxyz', '123123') + rot_type = rot_type.upper() + + if rot_order not in approved_orders: + raise TypeError('The supplied order is not an approved type') + + if rot_type == 'AXIS': + self.orient_axis(parent, amounts[1], amounts[0]) + + elif rot_type == 'DCM': + self.orient_explicit(parent, amounts) + + elif rot_type == 'BODY': + self.orient_body_fixed(parent, amounts, rot_order) + + elif rot_type == 'SPACE': + self.orient_space_fixed(parent, amounts, rot_order) + + elif rot_type == 'QUATERNION': + self.orient_quaternion(parent, amounts) + + else: + raise NotImplementedError('That is not an implemented rotation') + + def orientnew(self, newname, rot_type, amounts, rot_order='', + variables=None, indices=None, latexs=None): + r"""Returns a new reference frame oriented with respect to this + reference frame. + + See ``ReferenceFrame.orient()`` for detailed examples of how to orient + reference frames. + + Parameters + ========== + + newname : str + Name for the new reference frame. + rot_type : str + The method used to generate the direction cosine matrix. Supported + methods are: + + - ``'Axis'``: simple rotations about a single common axis + - ``'DCM'``: for setting the direction cosine matrix directly + - ``'Body'``: three successive rotations about new intermediate + axes, also called "Euler and Tait-Bryan angles" + - ``'Space'``: three successive rotations about the parent + frames' unit vectors + - ``'Quaternion'``: rotations defined by four parameters which + result in a singularity free direction cosine matrix + + amounts : + Expressions defining the rotation angles or direction cosine + matrix. These must match the ``rot_type``. See examples below for + details. The input types are: + + - ``'Axis'``: 2-tuple (expr/sym/func, Vector) + - ``'DCM'``: Matrix, shape(3,3) + - ``'Body'``: 3-tuple of expressions, symbols, or functions + - ``'Space'``: 3-tuple of expressions, symbols, or functions + - ``'Quaternion'``: 4-tuple of expressions, symbols, or + functions + + rot_order : str or int, optional + If applicable, the order of the successive of rotations. The string + ``'123'`` and integer ``123`` are equivalent, for example. Required + for ``'Body'`` and ``'Space'``. + indices : tuple of str + Enables the reference frame's basis unit vectors to be accessed by + Python's square bracket indexing notation using the provided three + indice strings and alters the printing of the unit vectors to + reflect this choice. + latexs : tuple of str + Alters the LaTeX printing of the reference frame's basis unit + vectors to the provided three valid LaTeX strings. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame, vlatex + >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3') + >>> N = ReferenceFrame('N') + + Create a new reference frame A rotated relative to N through a simple + rotation. + + >>> A = N.orientnew('A', 'Axis', (q0, N.x)) + + Create a new reference frame B rotated relative to N through body-fixed + rotations. + + >>> B = N.orientnew('B', 'Body', (q1, q2, q3), '123') + + Create a new reference frame C rotated relative to N through a simple + rotation with unique indices and LaTeX printing. + + >>> C = N.orientnew('C', 'Axis', (q0, N.x), indices=('1', '2', '3'), + ... latexs=(r'\hat{\mathbf{c}}_1',r'\hat{\mathbf{c}}_2', + ... r'\hat{\mathbf{c}}_3')) + >>> C['1'] + C['1'] + >>> print(vlatex(C['1'])) + \hat{\mathbf{c}}_1 + + """ + + newframe = self.__class__(newname, variables=variables, + indices=indices, latexs=latexs) + + approved_orders = ('123', '231', '312', '132', '213', '321', '121', + '131', '212', '232', '313', '323', '') + rot_order = translate(str(rot_order), 'XYZxyz', '123123') + rot_type = rot_type.upper() + + if rot_order not in approved_orders: + raise TypeError('The supplied order is not an approved type') + + if rot_type == 'AXIS': + newframe.orient_axis(self, amounts[1], amounts[0]) + + elif rot_type == 'DCM': + newframe.orient_explicit(self, amounts) + + elif rot_type == 'BODY': + newframe.orient_body_fixed(self, amounts, rot_order) + + elif rot_type == 'SPACE': + newframe.orient_space_fixed(self, amounts, rot_order) + + elif rot_type == 'QUATERNION': + newframe.orient_quaternion(self, amounts) + + else: + raise NotImplementedError('That is not an implemented rotation') + return newframe + + def set_ang_acc(self, otherframe, value): + """Define the angular acceleration Vector in a ReferenceFrame. + + Defines the angular acceleration of this ReferenceFrame, in another. + Angular acceleration can be defined with respect to multiple different + ReferenceFrames. Care must be taken to not create loops which are + inconsistent. + + Parameters + ========== + + otherframe : ReferenceFrame + A ReferenceFrame to define the angular acceleration in + value : Vector + The Vector representing angular acceleration + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> V = 10 * N.x + >>> A.set_ang_acc(N, V) + >>> A.ang_acc_in(N) + 10*N.x + + """ + + if value == 0: + value = Vector(0) + value = _check_vector(value) + _check_frame(otherframe) + self._ang_acc_dict.update({otherframe: value}) + otherframe._ang_acc_dict.update({self: -value}) + + def set_ang_vel(self, otherframe, value): + """Define the angular velocity vector in a ReferenceFrame. + + Defines the angular velocity of this ReferenceFrame, in another. + Angular velocity can be defined with respect to multiple different + ReferenceFrames. Care must be taken to not create loops which are + inconsistent. + + Parameters + ========== + + otherframe : ReferenceFrame + A ReferenceFrame to define the angular velocity in + value : Vector + The Vector representing angular velocity + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> V = 10 * N.x + >>> A.set_ang_vel(N, V) + >>> A.ang_vel_in(N) + 10*N.x + + """ + + if value == 0: + value = Vector(0) + value = _check_vector(value) + _check_frame(otherframe) + self._ang_vel_dict.update({otherframe: value}) + otherframe._ang_vel_dict.update({self: -value}) + + @property + def x(self): + """The basis Vector for the ReferenceFrame, in the x direction. """ + return self._x + + @property + def y(self): + """The basis Vector for the ReferenceFrame, in the y direction. """ + return self._y + + @property + def z(self): + """The basis Vector for the ReferenceFrame, in the z direction. """ + return self._z + + def partial_velocity(self, frame, *gen_speeds): + """Returns the partial angular velocities of this frame in the given + frame with respect to one or more provided generalized speeds. + + Parameters + ========== + frame : ReferenceFrame + The frame with which the angular velocity is defined in. + gen_speeds : functions of time + The generalized speeds. + + Returns + ======= + partial_velocities : tuple of Vector + The partial angular velocity vectors corresponding to the provided + generalized speeds. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> u1, u2 = dynamicsymbols('u1, u2') + >>> A.set_ang_vel(N, u1 * A.x + u2 * N.y) + >>> A.partial_velocity(N, u1) + A.x + >>> A.partial_velocity(N, u1, u2) + (A.x, N.y) + + """ + + partials = [self.ang_vel_in(frame).diff(speed, frame, var_in_dcm=False) + for speed in gen_speeds] + + if len(partials) == 1: + return partials[0] + else: + return tuple(partials) + + +def _check_frame(other): + from .vector import VectorTypeError + if not isinstance(other, ReferenceFrame): + raise VectorTypeError(other, ReferenceFrame('A')) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/functions.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..d80c6ad4d8cf73f1389220d6f0faa4c7f0cf0b63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/functions.py @@ -0,0 +1,644 @@ +from functools import reduce + +from sympy.core.backend import (sympify, diff, sin, cos, Matrix, symbols, + Function, S, Symbol) +from sympy.integrals.integrals import integrate +from sympy.simplify.trigsimp import trigsimp +from .vector import Vector, _check_vector +from .frame import CoordinateSym, _check_frame +from .dyadic import Dyadic +from .printing import vprint, vsprint, vpprint, vlatex, init_vprinting +from sympy.utilities.iterables import iterable +from sympy.utilities.misc import translate + +__all__ = ['cross', 'dot', 'express', 'time_derivative', 'outer', + 'kinematic_equations', 'get_motion_params', 'partial_velocity', + 'dynamicsymbols', 'vprint', 'vsprint', 'vpprint', 'vlatex', + 'init_vprinting'] + + +def cross(vec1, vec2): + """Cross product convenience wrapper for Vector.cross(): \n""" + if not isinstance(vec1, (Vector, Dyadic)): + raise TypeError('Cross product is between two vectors') + return vec1 ^ vec2 + + +cross.__doc__ += Vector.cross.__doc__ # type: ignore + + +def dot(vec1, vec2): + """Dot product convenience wrapper for Vector.dot(): \n""" + if not isinstance(vec1, (Vector, Dyadic)): + raise TypeError('Dot product is between two vectors') + return vec1 & vec2 + + +dot.__doc__ += Vector.dot.__doc__ # type: ignore + + +def express(expr, frame, frame2=None, variables=False): + """ + Global function for 'express' functionality. + + Re-expresses a Vector, scalar(sympyfiable) or Dyadic in given frame. + + Refer to the local methods of Vector and Dyadic for details. + If 'variables' is True, then the coordinate variables (CoordinateSym + instances) of other frames present in the vector/scalar field or + dyadic expression are also substituted in terms of the base scalars of + this frame. + + Parameters + ========== + + expr : Vector/Dyadic/scalar(sympyfiable) + The expression to re-express in ReferenceFrame 'frame' + + frame: ReferenceFrame + The reference frame to express expr in + + frame2 : ReferenceFrame + The other frame required for re-expression(only for Dyadic expr) + + variables : boolean + Specifies whether to substitute the coordinate variables present + in expr, in terms of those of frame + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> N = ReferenceFrame('N') + >>> q = dynamicsymbols('q') + >>> B = N.orientnew('B', 'Axis', [q, N.z]) + >>> d = outer(N.x, N.x) + >>> from sympy.physics.vector import express + >>> express(d, B, N) + cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x) + >>> express(B.x, N) + cos(q)*N.x + sin(q)*N.y + >>> express(N[0], B, variables=True) + B_x*cos(q) - B_y*sin(q) + + """ + + _check_frame(frame) + + if expr == 0: + return expr + + if isinstance(expr, Vector): + # Given expr is a Vector + if variables: + # If variables attribute is True, substitute the coordinate + # variables in the Vector + frame_list = [x[-1] for x in expr.args] + subs_dict = {} + for f in frame_list: + subs_dict.update(f.variable_map(frame)) + expr = expr.subs(subs_dict) + # Re-express in this frame + outvec = Vector([]) + for i, v in enumerate(expr.args): + if v[1] != frame: + temp = frame.dcm(v[1]) * v[0] + if Vector.simp: + temp = temp.applyfunc(lambda x: + trigsimp(x, method='fu')) + outvec += Vector([(temp, frame)]) + else: + outvec += Vector([v]) + return outvec + + if isinstance(expr, Dyadic): + if frame2 is None: + frame2 = frame + _check_frame(frame2) + ol = Dyadic(0) + for i, v in enumerate(expr.args): + ol += express(v[0], frame, variables=variables) * \ + (express(v[1], frame, variables=variables) | + express(v[2], frame2, variables=variables)) + return ol + + else: + if variables: + # Given expr is a scalar field + frame_set = set() + expr = sympify(expr) + # Substitute all the coordinate variables + for x in expr.free_symbols: + if isinstance(x, CoordinateSym) and x.frame != frame: + frame_set.add(x.frame) + subs_dict = {} + for f in frame_set: + subs_dict.update(f.variable_map(frame)) + return expr.subs(subs_dict) + return expr + + +def time_derivative(expr, frame, order=1): + """ + Calculate the time derivative of a vector/scalar field function + or dyadic expression in given frame. + + References + ========== + + https://en.wikipedia.org/wiki/Rotating_reference_frame#Time_derivatives_in_the_two_frames + + Parameters + ========== + + expr : Vector/Dyadic/sympifyable + The expression whose time derivative is to be calculated + + frame : ReferenceFrame + The reference frame to calculate the time derivative in + + order : integer + The order of the derivative to be calculated + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> from sympy import Symbol + >>> q1 = Symbol('q1') + >>> u1 = dynamicsymbols('u1') + >>> N = ReferenceFrame('N') + >>> A = N.orientnew('A', 'Axis', [q1, N.x]) + >>> v = u1 * N.x + >>> A.set_ang_vel(N, 10*A.x) + >>> from sympy.physics.vector import time_derivative + >>> time_derivative(v, N) + u1'*N.x + >>> time_derivative(u1*A[0], N) + N_x*u1' + >>> B = N.orientnew('B', 'Axis', [u1, N.z]) + >>> from sympy.physics.vector import outer + >>> d = outer(N.x, N.x) + >>> time_derivative(d, B) + - u1'*(N.y|N.x) - u1'*(N.x|N.y) + + """ + + t = dynamicsymbols._t + _check_frame(frame) + + if order == 0: + return expr + if order % 1 != 0 or order < 0: + raise ValueError("Unsupported value of order entered") + + if isinstance(expr, Vector): + outlist = [] + for i, v in enumerate(expr.args): + if v[1] == frame: + outlist += [(express(v[0], frame, variables=True).diff(t), + frame)] + else: + outlist += (time_derivative(Vector([v]), v[1]) + + (v[1].ang_vel_in(frame) ^ Vector([v]))).args + outvec = Vector(outlist) + return time_derivative(outvec, frame, order - 1) + + if isinstance(expr, Dyadic): + ol = Dyadic(0) + for i, v in enumerate(expr.args): + ol += (v[0].diff(t) * (v[1] | v[2])) + ol += (v[0] * (time_derivative(v[1], frame) | v[2])) + ol += (v[0] * (v[1] | time_derivative(v[2], frame))) + return time_derivative(ol, frame, order - 1) + + else: + return diff(express(expr, frame, variables=True), t, order) + + +def outer(vec1, vec2): + """Outer product convenience wrapper for Vector.outer():\n""" + if not isinstance(vec1, Vector): + raise TypeError('Outer product is between two Vectors') + return vec1 | vec2 + + +outer.__doc__ += Vector.outer.__doc__ # type: ignore + + +def kinematic_equations(speeds, coords, rot_type, rot_order=''): + """Gives equations relating the qdot's to u's for a rotation type. + + Supply rotation type and order as in orient. Speeds are assumed to be + body-fixed; if we are defining the orientation of B in A using by rot_type, + the angular velocity of B in A is assumed to be in the form: speed[0]*B.x + + speed[1]*B.y + speed[2]*B.z + + Parameters + ========== + + speeds : list of length 3 + The body fixed angular velocity measure numbers. + coords : list of length 3 or 4 + The coordinates used to define the orientation of the two frames. + rot_type : str + The type of rotation used to create the equations. Body, Space, or + Quaternion only + rot_order : str or int + If applicable, the order of a series of rotations. + + Examples + ======== + + >>> from sympy.physics.vector import dynamicsymbols + >>> from sympy.physics.vector import kinematic_equations, vprint + >>> u1, u2, u3 = dynamicsymbols('u1 u2 u3') + >>> q1, q2, q3 = dynamicsymbols('q1 q2 q3') + >>> vprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'), + ... order=None) + [-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3'] + + """ + + # Code below is checking and sanitizing input + approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131', + '212', '232', '313', '323', '1', '2', '3', '') + # make sure XYZ => 123 and rot_type is in lower case + rot_order = translate(str(rot_order), 'XYZxyz', '123123') + rot_type = rot_type.lower() + + if not isinstance(speeds, (list, tuple)): + raise TypeError('Need to supply speeds in a list') + if len(speeds) != 3: + raise TypeError('Need to supply 3 body-fixed speeds') + if not isinstance(coords, (list, tuple)): + raise TypeError('Need to supply coordinates in a list') + if rot_type in ['body', 'space']: + if rot_order not in approved_orders: + raise ValueError('Not an acceptable rotation order') + if len(coords) != 3: + raise ValueError('Need 3 coordinates for body or space') + # Actual hard-coded kinematic differential equations + w1, w2, w3 = speeds + if w1 == w2 == w3 == 0: + return [S.Zero]*3 + q1, q2, q3 = coords + q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords] + s1, s2, s3 = [sin(q1), sin(q2), sin(q3)] + c1, c2, c3 = [cos(q1), cos(q2), cos(q3)] + if rot_type == 'body': + if rot_order == '123': + return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 * + c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3] + if rot_order == '231': + return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 * + c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2] + if rot_order == '312': + return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 * + s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2] + if rot_order == '132': + return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 * + c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2] + if rot_order == '213': + return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 * + s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3] + if rot_order == '321': + return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 * + s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2] + if rot_order == '121': + return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 * + s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2] + if rot_order == '131': + return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 * + c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2] + if rot_order == '212': + return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 * + s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2] + if rot_order == '232': + return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 * + c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2] + if rot_order == '313': + return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 * + s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3] + if rot_order == '323': + return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 * + c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3] + if rot_type == 'space': + if rot_order == '123': + return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 * + c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2] + if rot_order == '231': + return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 * + s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2] + if rot_order == '312': + return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 * + c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2] + if rot_order == '132': + return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 * + s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2] + if rot_order == '213': + return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 * + c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2] + if rot_order == '321': + return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 * + s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2] + if rot_order == '121': + return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 * + c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2] + if rot_order == '131': + return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 * + s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2] + if rot_order == '212': + return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 * + c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2] + if rot_order == '232': + return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 * + s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2] + if rot_order == '313': + return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 * + c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2] + if rot_order == '323': + return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 * + s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2] + elif rot_type == 'quaternion': + if rot_order != '': + raise ValueError('Cannot have rotation order for quaternion') + if len(coords) != 4: + raise ValueError('Need 4 coordinates for quaternion') + # Actual hard-coded kinematic differential equations + e0, e1, e2, e3 = coords + w = Matrix(speeds + [0]) + E = Matrix([[e0, -e3, e2, e1], + [e3, e0, -e1, e2], + [-e2, e1, e0, e3], + [-e1, -e2, -e3, e0]]) + edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]]) + return list(edots.T - 0.5 * w.T * E.T) + else: + raise ValueError('Not an approved rotation type for this function') + + +def get_motion_params(frame, **kwargs): + """ + Returns the three motion parameters - (acceleration, velocity, and + position) as vectorial functions of time in the given frame. + + If a higher order differential function is provided, the lower order + functions are used as boundary conditions. For example, given the + acceleration, the velocity and position parameters are taken as + boundary conditions. + + The values of time at which the boundary conditions are specified + are taken from timevalue1(for position boundary condition) and + timevalue2(for velocity boundary condition). + + If any of the boundary conditions are not provided, they are taken + to be zero by default (zero vectors, in case of vectorial inputs). If + the boundary conditions are also functions of time, they are converted + to constants by substituting the time values in the dynamicsymbols._t + time Symbol. + + This function can also be used for calculating rotational motion + parameters. Have a look at the Parameters and Examples for more clarity. + + Parameters + ========== + + frame : ReferenceFrame + The frame to express the motion parameters in + + acceleration : Vector + Acceleration of the object/frame as a function of time + + velocity : Vector + Velocity as function of time or as boundary condition + of velocity at time = timevalue1 + + position : Vector + Velocity as function of time or as boundary condition + of velocity at time = timevalue1 + + timevalue1 : sympyfiable + Value of time for position boundary condition + + timevalue2 : sympyfiable + Value of time for velocity boundary condition + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> from sympy import symbols + >>> R = ReferenceFrame('R') + >>> v1, v2, v3 = dynamicsymbols('v1 v2 v3') + >>> v = v1*R.x + v2*R.y + v3*R.z + >>> get_motion_params(R, position = v) + (v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z) + >>> a, b, c = symbols('a b c') + >>> v = a*R.x + b*R.y + c*R.z + >>> get_motion_params(R, velocity = v) + (0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z) + >>> parameters = get_motion_params(R, acceleration = v) + >>> parameters[1] + a*t*R.x + b*t*R.y + c*t*R.z + >>> parameters[2] + a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z + + """ + + def _process_vector_differential(vectdiff, condition, variable, ordinate, + frame): + """ + Helper function for get_motion methods. Finds derivative of vectdiff + wrt variable, and its integral using the specified boundary condition + at value of variable = ordinate. + Returns a tuple of - (derivative, function and integral) wrt vectdiff + + """ + + # Make sure boundary condition is independent of 'variable' + if condition != 0: + condition = express(condition, frame, variables=True) + # Special case of vectdiff == 0 + if vectdiff == Vector(0): + return (0, 0, condition) + # Express vectdiff completely in condition's frame to give vectdiff1 + vectdiff1 = express(vectdiff, frame) + # Find derivative of vectdiff + vectdiff2 = time_derivative(vectdiff, frame) + # Integrate and use boundary condition + vectdiff0 = Vector(0) + lims = (variable, ordinate, variable) + for dim in frame: + function1 = vectdiff1.dot(dim) + abscissa = dim.dot(condition).subs({variable: ordinate}) + # Indefinite integral of 'function1' wrt 'variable', using + # the given initial condition (ordinate, abscissa). + vectdiff0 += (integrate(function1, lims) + abscissa) * dim + # Return tuple + return (vectdiff2, vectdiff, vectdiff0) + + _check_frame(frame) + # Decide mode of operation based on user's input + if 'acceleration' in kwargs: + mode = 2 + elif 'velocity' in kwargs: + mode = 1 + else: + mode = 0 + # All the possible parameters in kwargs + # Not all are required for every case + # If not specified, set to default values(may or may not be used in + # calculations) + conditions = ['acceleration', 'velocity', 'position', + 'timevalue', 'timevalue1', 'timevalue2'] + for i, x in enumerate(conditions): + if x not in kwargs: + if i < 3: + kwargs[x] = Vector(0) + else: + kwargs[x] = S.Zero + elif i < 3: + _check_vector(kwargs[x]) + else: + kwargs[x] = sympify(kwargs[x]) + if mode == 2: + vel = _process_vector_differential(kwargs['acceleration'], + kwargs['velocity'], + dynamicsymbols._t, + kwargs['timevalue2'], frame)[2] + pos = _process_vector_differential(vel, kwargs['position'], + dynamicsymbols._t, + kwargs['timevalue1'], frame)[2] + return (kwargs['acceleration'], vel, pos) + elif mode == 1: + return _process_vector_differential(kwargs['velocity'], + kwargs['position'], + dynamicsymbols._t, + kwargs['timevalue1'], frame) + else: + vel = time_derivative(kwargs['position'], frame) + acc = time_derivative(vel, frame) + return (acc, vel, kwargs['position']) + + +def partial_velocity(vel_vecs, gen_speeds, frame): + """Returns a list of partial velocities with respect to the provided + generalized speeds in the given reference frame for each of the supplied + velocity vectors. + + The output is a list of lists. The outer list has a number of elements + equal to the number of supplied velocity vectors. The inner lists are, for + each velocity vector, the partial derivatives of that velocity vector with + respect to the generalized speeds supplied. + + Parameters + ========== + + vel_vecs : iterable + An iterable of velocity vectors (angular or linear). + gen_speeds : iterable + An iterable of generalized speeds. + frame : ReferenceFrame + The reference frame that the partial derivatives are going to be taken + in. + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> from sympy.physics.vector import dynamicsymbols + >>> from sympy.physics.vector import partial_velocity + >>> u = dynamicsymbols('u') + >>> N = ReferenceFrame('N') + >>> P = Point('P') + >>> P.set_vel(N, u * N.x) + >>> vel_vecs = [P.vel(N)] + >>> gen_speeds = [u] + >>> partial_velocity(vel_vecs, gen_speeds, N) + [[N.x]] + + """ + + if not iterable(vel_vecs): + raise TypeError('Velocity vectors must be contained in an iterable.') + + if not iterable(gen_speeds): + raise TypeError('Generalized speeds must be contained in an iterable') + + vec_partials = [] + for vec in vel_vecs: + partials = [] + for speed in gen_speeds: + partials.append(vec.diff(speed, frame, var_in_dcm=False)) + vec_partials.append(partials) + + return vec_partials + + +def dynamicsymbols(names, level=0, **assumptions): + """Uses symbols and Function for functions of time. + + Creates a SymPy UndefinedFunction, which is then initialized as a function + of a variable, the default being Symbol('t'). + + Parameters + ========== + + names : str + Names of the dynamic symbols you want to create; works the same way as + inputs to symbols + level : int + Level of differentiation of the returned function; d/dt once of t, + twice of t, etc. + assumptions : + - real(bool) : This is used to set the dynamicsymbol as real, + by default is False. + - positive(bool) : This is used to set the dynamicsymbol as positive, + by default is False. + - commutative(bool) : This is used to set the commutative property of + a dynamicsymbol, by default is True. + - integer(bool) : This is used to set the dynamicsymbol as integer, + by default is False. + + Examples + ======== + + >>> from sympy.physics.vector import dynamicsymbols + >>> from sympy import diff, Symbol + >>> q1 = dynamicsymbols('q1') + >>> q1 + q1(t) + >>> q2 = dynamicsymbols('q2', real=True) + >>> q2.is_real + True + >>> q3 = dynamicsymbols('q3', positive=True) + >>> q3.is_positive + True + >>> q4, q5 = dynamicsymbols('q4,q5', commutative=False) + >>> bool(q4*q5 != q5*q4) + True + >>> q6 = dynamicsymbols('q6', integer=True) + >>> q6.is_integer + True + >>> diff(q1, Symbol('t')) + Derivative(q1(t), t) + + """ + esses = symbols(names, cls=Function, **assumptions) + t = dynamicsymbols._t + if iterable(esses): + esses = [reduce(diff, [t] * level, e(t)) for e in esses] + return esses + else: + return reduce(diff, [t] * level, esses(t)) + + +dynamicsymbols._t = Symbol('t') # type: ignore +dynamicsymbols._str = '\'' # type: ignore diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/point.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/point.py new file mode 100644 index 0000000000000000000000000000000000000000..14e5db08da9b7bb0cc9a10291422c3d17c6ea87b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/point.py @@ -0,0 +1,629 @@ +from .vector import Vector, _check_vector +from .frame import _check_frame +from warnings import warn + +__all__ = ['Point'] + + +class Point: + """This object represents a point in a dynamic system. + + It stores the: position, velocity, and acceleration of a point. + The position is a vector defined as the vector distance from a parent + point to this point. + + Parameters + ========== + + name : string + The display name of the Point + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> N = ReferenceFrame('N') + >>> O = Point('O') + >>> P = Point('P') + >>> u1, u2, u3 = dynamicsymbols('u1 u2 u3') + >>> O.set_vel(N, u1 * N.x + u2 * N.y + u3 * N.z) + >>> O.acc(N) + u1'*N.x + u2'*N.y + u3'*N.z + + ``symbols()`` can be used to create multiple Points in a single step, for + example: + + >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> from sympy import symbols + >>> N = ReferenceFrame('N') + >>> u1, u2 = dynamicsymbols('u1 u2') + >>> A, B = symbols('A B', cls=Point) + >>> type(A) + + >>> A.set_vel(N, u1 * N.x + u2 * N.y) + >>> B.set_vel(N, u2 * N.x + u1 * N.y) + >>> A.acc(N) - B.acc(N) + (u1' - u2')*N.x + (-u1' + u2')*N.y + + """ + + def __init__(self, name): + """Initialization of a Point object. """ + self.name = name + self._pos_dict = {} + self._vel_dict = {} + self._acc_dict = {} + self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict] + + def __str__(self): + return self.name + + __repr__ = __str__ + + def _check_point(self, other): + if not isinstance(other, Point): + raise TypeError('A Point must be supplied') + + def _pdict_list(self, other, num): + """Returns a list of points that gives the shortest path with respect + to position, velocity, or acceleration from this point to the provided + point. + + Parameters + ========== + other : Point + A point that may be related to this point by position, velocity, or + acceleration. + num : integer + 0 for searching the position tree, 1 for searching the velocity + tree, and 2 for searching the acceleration tree. + + Returns + ======= + list of Points + A sequence of points from self to other. + + Notes + ===== + + It is not clear if num = 1 or num = 2 actually works because the keys + to ``_vel_dict`` and ``_acc_dict`` are :class:`ReferenceFrame` objects + which do not have the ``_pdlist`` attribute. + + """ + outlist = [[self]] + oldlist = [[]] + while outlist != oldlist: + oldlist = outlist[:] + for i, v in enumerate(outlist): + templist = v[-1]._pdlist[num].keys() + for i2, v2 in enumerate(templist): + if not v.__contains__(v2): + littletemplist = v + [v2] + if not outlist.__contains__(littletemplist): + outlist.append(littletemplist) + for i, v in enumerate(oldlist): + if v[-1] != other: + outlist.remove(v) + outlist.sort(key=len) + if len(outlist) != 0: + return outlist[0] + raise ValueError('No Connecting Path found between ' + other.name + + ' and ' + self.name) + + def a1pt_theory(self, otherpoint, outframe, interframe): + """Sets the acceleration of this point with the 1-point theory. + + The 1-point theory for point acceleration looks like this: + + ^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B + x r^OP) + 2 ^N omega^B x ^B v^P + + where O is a point fixed in B, P is a point moving in B, and B is + rotating in frame N. + + Parameters + ========== + + otherpoint : Point + The first point of the 1-point theory (O) + outframe : ReferenceFrame + The frame we want this point's acceleration defined in (N) + fixedframe : ReferenceFrame + The intermediate frame in this calculation (B) + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> from sympy.physics.vector import dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> q = dynamicsymbols('q') + >>> q2 = dynamicsymbols('q2') + >>> qd = dynamicsymbols('q', 1) + >>> q2d = dynamicsymbols('q2', 1) + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + >>> B.set_ang_vel(N, 5 * B.y) + >>> O = Point('O') + >>> P = O.locatenew('P', q * B.x) + >>> P.set_vel(B, qd * B.x + q2d * B.y) + >>> O.set_vel(N, 0) + >>> P.a1pt_theory(O, N, B) + (-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z + + """ + + _check_frame(outframe) + _check_frame(interframe) + self._check_point(otherpoint) + dist = self.pos_from(otherpoint) + v = self.vel(interframe) + a1 = otherpoint.acc(outframe) + a2 = self.acc(interframe) + omega = interframe.ang_vel_in(outframe) + alpha = interframe.ang_acc_in(outframe) + self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) + + (omega ^ (omega ^ dist))) + return self.acc(outframe) + + def a2pt_theory(self, otherpoint, outframe, fixedframe): + """Sets the acceleration of this point with the 2-point theory. + + The 2-point theory for point acceleration looks like this: + + ^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP) + + where O and P are both points fixed in frame B, which is rotating in + frame N. + + Parameters + ========== + + otherpoint : Point + The first point of the 2-point theory (O) + outframe : ReferenceFrame + The frame we want this point's acceleration defined in (N) + fixedframe : ReferenceFrame + The frame in which both points are fixed (B) + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> q = dynamicsymbols('q') + >>> qd = dynamicsymbols('q', 1) + >>> N = ReferenceFrame('N') + >>> B = N.orientnew('B', 'Axis', [q, N.z]) + >>> O = Point('O') + >>> P = O.locatenew('P', 10 * B.x) + >>> O.set_vel(N, 5 * N.x) + >>> P.a2pt_theory(O, N, B) + - 10*q'**2*B.x + 10*q''*B.y + + """ + + _check_frame(outframe) + _check_frame(fixedframe) + self._check_point(otherpoint) + dist = self.pos_from(otherpoint) + a = otherpoint.acc(outframe) + omega = fixedframe.ang_vel_in(outframe) + alpha = fixedframe.ang_acc_in(outframe) + self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist))) + return self.acc(outframe) + + def acc(self, frame): + """The acceleration Vector of this Point in a ReferenceFrame. + + Parameters + ========== + + frame : ReferenceFrame + The frame in which the returned acceleration vector will be defined + in. + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p1.set_acc(N, 10 * N.x) + >>> p1.acc(N) + 10*N.x + + """ + + _check_frame(frame) + if not (frame in self._acc_dict): + if self.vel(frame) != 0: + return (self._vel_dict[frame]).dt(frame) + else: + return Vector(0) + return self._acc_dict[frame] + + def locatenew(self, name, value): + """Creates a new point with a position defined from this point. + + Parameters + ========== + + name : str + The name for the new point + value : Vector + The position of the new point relative to this point + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, Point + >>> N = ReferenceFrame('N') + >>> P1 = Point('P1') + >>> P2 = P1.locatenew('P2', 10 * N.x) + + """ + + if not isinstance(name, str): + raise TypeError('Must supply a valid name') + if value == 0: + value = Vector(0) + value = _check_vector(value) + p = Point(name) + p.set_pos(self, value) + self.set_pos(p, -value) + return p + + def pos_from(self, otherpoint): + """Returns a Vector distance between this Point and the other Point. + + Parameters + ========== + + otherpoint : Point + The otherpoint we are locating this one relative to + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p2 = Point('p2') + >>> p1.set_pos(p2, 10 * N.x) + >>> p1.pos_from(p2) + 10*N.x + + """ + + outvec = Vector(0) + plist = self._pdict_list(otherpoint, 0) + for i in range(len(plist) - 1): + outvec += plist[i]._pos_dict[plist[i + 1]] + return outvec + + def set_acc(self, frame, value): + """Used to set the acceleration of this Point in a ReferenceFrame. + + Parameters + ========== + + frame : ReferenceFrame + The frame in which this point's acceleration is defined + value : Vector + The vector value of this point's acceleration in the frame + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p1.set_acc(N, 10 * N.x) + >>> p1.acc(N) + 10*N.x + + """ + + if value == 0: + value = Vector(0) + value = _check_vector(value) + _check_frame(frame) + self._acc_dict.update({frame: value}) + + def set_pos(self, otherpoint, value): + """Used to set the position of this point w.r.t. another point. + + Parameters + ========== + + otherpoint : Point + The other point which this point's location is defined relative to + value : Vector + The vector which defines the location of this point + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p2 = Point('p2') + >>> p1.set_pos(p2, 10 * N.x) + >>> p1.pos_from(p2) + 10*N.x + + """ + + if value == 0: + value = Vector(0) + value = _check_vector(value) + self._check_point(otherpoint) + self._pos_dict.update({otherpoint: value}) + otherpoint._pos_dict.update({self: -value}) + + def set_vel(self, frame, value): + """Sets the velocity Vector of this Point in a ReferenceFrame. + + Parameters + ========== + + frame : ReferenceFrame + The frame in which this point's velocity is defined + value : Vector + The vector value of this point's velocity in the frame + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p1.set_vel(N, 10 * N.x) + >>> p1.vel(N) + 10*N.x + + """ + + if value == 0: + value = Vector(0) + value = _check_vector(value) + _check_frame(frame) + self._vel_dict.update({frame: value}) + + def v1pt_theory(self, otherpoint, outframe, interframe): + """Sets the velocity of this point with the 1-point theory. + + The 1-point theory for point velocity looks like this: + + ^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP + + where O is a point fixed in B, P is a point moving in B, and B is + rotating in frame N. + + Parameters + ========== + + otherpoint : Point + The first point of the 1-point theory (O) + outframe : ReferenceFrame + The frame we want this point's velocity defined in (N) + interframe : ReferenceFrame + The intermediate frame in this calculation (B) + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame + >>> from sympy.physics.vector import dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> q = dynamicsymbols('q') + >>> q2 = dynamicsymbols('q2') + >>> qd = dynamicsymbols('q', 1) + >>> q2d = dynamicsymbols('q2', 1) + >>> N = ReferenceFrame('N') + >>> B = ReferenceFrame('B') + >>> B.set_ang_vel(N, 5 * B.y) + >>> O = Point('O') + >>> P = O.locatenew('P', q * B.x) + >>> P.set_vel(B, qd * B.x + q2d * B.y) + >>> O.set_vel(N, 0) + >>> P.v1pt_theory(O, N, B) + q'*B.x + q2'*B.y - 5*q*B.z + + """ + + _check_frame(outframe) + _check_frame(interframe) + self._check_point(otherpoint) + dist = self.pos_from(otherpoint) + v1 = self.vel(interframe) + v2 = otherpoint.vel(outframe) + omega = interframe.ang_vel_in(outframe) + self.set_vel(outframe, v1 + v2 + (omega ^ dist)) + return self.vel(outframe) + + def v2pt_theory(self, otherpoint, outframe, fixedframe): + """Sets the velocity of this point with the 2-point theory. + + The 2-point theory for point velocity looks like this: + + ^N v^P = ^N v^O + ^N omega^B x r^OP + + where O and P are both points fixed in frame B, which is rotating in + frame N. + + Parameters + ========== + + otherpoint : Point + The first point of the 2-point theory (O) + outframe : ReferenceFrame + The frame we want this point's velocity defined in (N) + fixedframe : ReferenceFrame + The frame in which both points are fixed (B) + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> q = dynamicsymbols('q') + >>> qd = dynamicsymbols('q', 1) + >>> N = ReferenceFrame('N') + >>> B = N.orientnew('B', 'Axis', [q, N.z]) + >>> O = Point('O') + >>> P = O.locatenew('P', 10 * B.x) + >>> O.set_vel(N, 5 * N.x) + >>> P.v2pt_theory(O, N, B) + 5*N.x + 10*q'*B.y + + """ + + _check_frame(outframe) + _check_frame(fixedframe) + self._check_point(otherpoint) + dist = self.pos_from(otherpoint) + v = otherpoint.vel(outframe) + omega = fixedframe.ang_vel_in(outframe) + self.set_vel(outframe, v + (omega ^ dist)) + return self.vel(outframe) + + def vel(self, frame): + """The velocity Vector of this Point in the ReferenceFrame. + + Parameters + ========== + + frame : ReferenceFrame + The frame in which the returned velocity vector will be defined in + + Examples + ======== + + >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols + >>> N = ReferenceFrame('N') + >>> p1 = Point('p1') + >>> p1.set_vel(N, 10 * N.x) + >>> p1.vel(N) + 10*N.x + + Velocities will be automatically calculated if possible, otherwise a + ``ValueError`` will be returned. If it is possible to calculate + multiple different velocities from the relative points, the points + defined most directly relative to this point will be used. In the case + of inconsistent relative positions of points, incorrect velocities may + be returned. It is up to the user to define prior relative positions + and velocities of points in a self-consistent way. + + >>> p = Point('p') + >>> q = dynamicsymbols('q') + >>> p.set_vel(N, 10 * N.x) + >>> p2 = Point('p2') + >>> p2.set_pos(p, q*N.x) + >>> p2.vel(N) + (Derivative(q(t), t) + 10)*N.x + + """ + + _check_frame(frame) + if not (frame in self._vel_dict): + valid_neighbor_found = False + is_cyclic = False + visited = [] + queue = [self] + candidate_neighbor = [] + while queue: # BFS to find nearest point + node = queue.pop(0) + if node not in visited: + visited.append(node) + for neighbor, neighbor_pos in node._pos_dict.items(): + if neighbor in visited: + continue + try: + # Checks if pos vector is valid + neighbor_pos.express(frame) + except ValueError: + continue + if neighbor in queue: + is_cyclic = True + try: + # Checks if point has its vel defined in req frame + neighbor_velocity = neighbor._vel_dict[frame] + except KeyError: + queue.append(neighbor) + continue + candidate_neighbor.append(neighbor) + if not valid_neighbor_found: + self.set_vel(frame, self.pos_from(neighbor).dt(frame) + neighbor_velocity) + valid_neighbor_found = True + if is_cyclic: + warn('Kinematic loops are defined among the positions of ' + 'points. This is likely not desired and may cause errors ' + 'in your calculations.') + if len(candidate_neighbor) > 1: + warn('Velocity automatically calculated based on point ' + + candidate_neighbor[0].name + + ' but it is also possible from points(s):' + + str(candidate_neighbor[1:]) + + '. Velocities from these points are not necessarily the ' + 'same. This may cause errors in your calculations.') + if valid_neighbor_found: + return self._vel_dict[frame] + else: + raise ValueError('Velocity of point ' + self.name + + ' has not been' + ' defined in ReferenceFrame ' + frame.name) + + return self._vel_dict[frame] + + def partial_velocity(self, frame, *gen_speeds): + """Returns the partial velocities of the linear velocity vector of this + point in the given frame with respect to one or more provided + generalized speeds. + + Parameters + ========== + frame : ReferenceFrame + The frame with which the velocity is defined in. + gen_speeds : functions of time + The generalized speeds. + + Returns + ======= + partial_velocities : tuple of Vector + The partial velocity vectors corresponding to the provided + generalized speeds. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, Point + >>> from sympy.physics.vector import dynamicsymbols + >>> N = ReferenceFrame('N') + >>> A = ReferenceFrame('A') + >>> p = Point('p') + >>> u1, u2 = dynamicsymbols('u1, u2') + >>> p.set_vel(N, u1 * N.x + u2 * A.y) + >>> p.partial_velocity(N, u1) + N.x + >>> p.partial_velocity(N, u1, u2) + (N.x, A.y) + + """ + partials = [self.vel(frame).diff(speed, frame, var_in_dcm=False) for + speed in gen_speeds] + + if len(partials) == 1: + return partials[0] + else: + return tuple(partials) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/printing.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/printing.py new file mode 100644 index 0000000000000000000000000000000000000000..59efcd90e3e5bad64a2180efab4ce31d3ae94ad3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/printing.py @@ -0,0 +1,371 @@ +from sympy.core.function import Derivative +from sympy.core.function import UndefinedFunction, AppliedUndef +from sympy.core.symbol import Symbol +from sympy.interactive.printing import init_printing +from sympy.printing.latex import LatexPrinter +from sympy.printing.pretty.pretty import PrettyPrinter +from sympy.printing.pretty.pretty_symbology import center_accent +from sympy.printing.str import StrPrinter +from sympy.printing.precedence import PRECEDENCE + +__all__ = ['vprint', 'vsstrrepr', 'vsprint', 'vpprint', 'vlatex', + 'init_vprinting'] + + +class VectorStrPrinter(StrPrinter): + """String Printer for vector expressions. """ + + def _print_Derivative(self, e): + from sympy.physics.vector.functions import dynamicsymbols + t = dynamicsymbols._t + if (bool(sum([i == t for i in e.variables])) & + isinstance(type(e.args[0]), UndefinedFunction)): + ol = str(e.args[0].func) + for i, v in enumerate(e.variables): + ol += dynamicsymbols._str + return ol + else: + return StrPrinter().doprint(e) + + def _print_Function(self, e): + from sympy.physics.vector.functions import dynamicsymbols + t = dynamicsymbols._t + if isinstance(type(e), UndefinedFunction): + return StrPrinter().doprint(e).replace("(%s)" % t, '') + return e.func.__name__ + "(%s)" % self.stringify(e.args, ", ") + + +class VectorStrReprPrinter(VectorStrPrinter): + """String repr printer for vector expressions.""" + def _print_str(self, s): + return repr(s) + + +class VectorLatexPrinter(LatexPrinter): + """Latex Printer for vector expressions. """ + + def _print_Function(self, expr, exp=None): + from sympy.physics.vector.functions import dynamicsymbols + func = expr.func.__name__ + t = dynamicsymbols._t + + if (hasattr(self, '_print_' + func) and not + isinstance(type(expr), UndefinedFunction)): + return getattr(self, '_print_' + func)(expr, exp) + elif isinstance(type(expr), UndefinedFunction) and (expr.args == (t,)): + # treat this function like a symbol + expr = Symbol(func) + if exp is not None: + # copied from LatexPrinter._helper_print_standard_power, which + # we can't call because we only have exp as a string. + base = self.parenthesize(expr, PRECEDENCE['Pow']) + base = self.parenthesize_super(base) + return r"%s^{%s}" % (base, exp) + else: + return super()._print(expr) + else: + return super()._print_Function(expr, exp) + + def _print_Derivative(self, der_expr): + from sympy.physics.vector.functions import dynamicsymbols + # make sure it is in the right form + der_expr = der_expr.doit() + if not isinstance(der_expr, Derivative): + return r"\left(%s\right)" % self.doprint(der_expr) + + # check if expr is a dynamicsymbol + t = dynamicsymbols._t + expr = der_expr.expr + red = expr.atoms(AppliedUndef) + syms = der_expr.variables + test1 = not all(True for i in red if i.free_symbols == {t}) + test2 = not all(t == i for i in syms) + if test1 or test2: + return super()._print_Derivative(der_expr) + + # done checking + dots = len(syms) + base = self._print_Function(expr) + base_split = base.split('_', 1) + base = base_split[0] + if dots == 1: + base = r"\dot{%s}" % base + elif dots == 2: + base = r"\ddot{%s}" % base + elif dots == 3: + base = r"\dddot{%s}" % base + elif dots == 4: + base = r"\ddddot{%s}" % base + else: # Fallback to standard printing + return super()._print_Derivative(der_expr) + if len(base_split) != 1: + base += '_' + base_split[1] + return base + + +class VectorPrettyPrinter(PrettyPrinter): + """Pretty Printer for vectorialexpressions. """ + + def _print_Derivative(self, deriv): + from sympy.physics.vector.functions import dynamicsymbols + # XXX use U('PARTIAL DIFFERENTIAL') here ? + t = dynamicsymbols._t + dot_i = 0 + syms = list(reversed(deriv.variables)) + + while len(syms) > 0: + if syms[-1] == t: + syms.pop() + dot_i += 1 + else: + return super()._print_Derivative(deriv) + + if not (isinstance(type(deriv.expr), UndefinedFunction) and + (deriv.expr.args == (t,))): + return super()._print_Derivative(deriv) + else: + pform = self._print_Function(deriv.expr) + + # the following condition would happen with some sort of non-standard + # dynamic symbol I guess, so we'll just print the SymPy way + if len(pform.picture) > 1: + return super()._print_Derivative(deriv) + + # There are only special symbols up to fourth-order derivatives + if dot_i >= 5: + return super()._print_Derivative(deriv) + + # Deal with special symbols + dots = {0: "", + 1: "\N{COMBINING DOT ABOVE}", + 2: "\N{COMBINING DIAERESIS}", + 3: "\N{COMBINING THREE DOTS ABOVE}", + 4: "\N{COMBINING FOUR DOTS ABOVE}"} + + d = pform.__dict__ + # if unicode is false then calculate number of apostrophes needed and + # add to output + if not self._use_unicode: + apostrophes = "" + for i in range(0, dot_i): + apostrophes += "'" + d['picture'][0] += apostrophes + "(t)" + else: + d['picture'] = [center_accent(d['picture'][0], dots[dot_i])] + return pform + + def _print_Function(self, e): + from sympy.physics.vector.functions import dynamicsymbols + t = dynamicsymbols._t + # XXX works only for applied functions + func = e.func + args = e.args + func_name = func.__name__ + pform = self._print_Symbol(Symbol(func_name)) + # If this function is an Undefined function of t, it is probably a + # dynamic symbol, so we'll skip the (t). The rest of the code is + # identical to the normal PrettyPrinter code + if not (isinstance(func, UndefinedFunction) and (args == (t,))): + return super()._print_Function(e) + return pform + + +def vprint(expr, **settings): + r"""Function for printing of expressions generated in the + sympy.physics vector package. + + Extends SymPy's StrPrinter, takes the same setting accepted by SymPy's + :func:`~.sstr`, and is equivalent to ``print(sstr(foo))``. + + Parameters + ========== + + expr : valid SymPy object + SymPy expression to print. + settings : args + Same as the settings accepted by SymPy's sstr(). + + Examples + ======== + + >>> from sympy.physics.vector import vprint, dynamicsymbols + >>> u1 = dynamicsymbols('u1') + >>> print(u1) + u1(t) + >>> vprint(u1) + u1 + + """ + + outstr = vsprint(expr, **settings) + + import builtins + if (outstr != 'None'): + builtins._ = outstr + print(outstr) + + +def vsstrrepr(expr, **settings): + """Function for displaying expression representation's with vector + printing enabled. + + Parameters + ========== + + expr : valid SymPy object + SymPy expression to print. + settings : args + Same as the settings accepted by SymPy's sstrrepr(). + + """ + p = VectorStrReprPrinter(settings) + return p.doprint(expr) + + +def vsprint(expr, **settings): + r"""Function for displaying expressions generated in the + sympy.physics vector package. + + Returns the output of vprint() as a string. + + Parameters + ========== + + expr : valid SymPy object + SymPy expression to print + settings : args + Same as the settings accepted by SymPy's sstr(). + + Examples + ======== + + >>> from sympy.physics.vector import vsprint, dynamicsymbols + >>> u1, u2 = dynamicsymbols('u1 u2') + >>> u2d = dynamicsymbols('u2', level=1) + >>> print("%s = %s" % (u1, u2 + u2d)) + u1(t) = u2(t) + Derivative(u2(t), t) + >>> print("%s = %s" % (vsprint(u1), vsprint(u2 + u2d))) + u1 = u2 + u2' + + """ + + string_printer = VectorStrPrinter(settings) + return string_printer.doprint(expr) + + +def vpprint(expr, **settings): + r"""Function for pretty printing of expressions generated in the + sympy.physics vector package. + + Mainly used for expressions not inside a vector; the output of running + scripts and generating equations of motion. Takes the same options as + SymPy's :func:`~.pretty_print`; see that function for more information. + + Parameters + ========== + + expr : valid SymPy object + SymPy expression to pretty print + settings : args + Same as those accepted by SymPy's pretty_print. + + + """ + + pp = VectorPrettyPrinter(settings) + + # Note that this is copied from sympy.printing.pretty.pretty_print: + + # XXX: this is an ugly hack, but at least it works + use_unicode = pp._settings['use_unicode'] + from sympy.printing.pretty.pretty_symbology import pretty_use_unicode + uflag = pretty_use_unicode(use_unicode) + + try: + return pp.doprint(expr) + finally: + pretty_use_unicode(uflag) + + +def vlatex(expr, **settings): + r"""Function for printing latex representation of sympy.physics.vector + objects. + + For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the + same options as SymPy's :func:`~.latex`; see that function for more + information; + + Parameters + ========== + + expr : valid SymPy object + SymPy expression to represent in LaTeX form + settings : args + Same as latex() + + Examples + ======== + + >>> from sympy.physics.vector import vlatex, ReferenceFrame, dynamicsymbols + >>> N = ReferenceFrame('N') + >>> q1, q2 = dynamicsymbols('q1 q2') + >>> q1d, q2d = dynamicsymbols('q1 q2', 1) + >>> q1dd, q2dd = dynamicsymbols('q1 q2', 2) + >>> vlatex(N.x + N.y) + '\\mathbf{\\hat{n}_x} + \\mathbf{\\hat{n}_y}' + >>> vlatex(q1 + q2) + 'q_{1} + q_{2}' + >>> vlatex(q1d) + '\\dot{q}_{1}' + >>> vlatex(q1 * q2d) + 'q_{1} \\dot{q}_{2}' + >>> vlatex(q1dd * q1 / q1d) + '\\frac{q_{1} \\ddot{q}_{1}}{\\dot{q}_{1}}' + + """ + latex_printer = VectorLatexPrinter(settings) + + return latex_printer.doprint(expr) + + +def init_vprinting(**kwargs): + """Initializes time derivative printing for all SymPy objects, i.e. any + functions of time will be displayed in a more compact notation. The main + benefit of this is for printing of time derivatives; instead of + displaying as ``Derivative(f(t),t)``, it will display ``f'``. This is + only actually needed for when derivatives are present and are not in a + physics.vector.Vector or physics.vector.Dyadic object. This function is a + light wrapper to :func:`~.init_printing`. Any keyword + arguments for it are valid here. + + {0} + + Examples + ======== + + >>> from sympy import Function, symbols + >>> t, x = symbols('t, x') + >>> omega = Function('omega') + >>> omega(x).diff() + Derivative(omega(x), x) + >>> omega(t).diff() + Derivative(omega(t), t) + + Now use the string printer: + + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> omega(x).diff() + Derivative(omega(x), x) + >>> omega(t).diff() + omega' + + """ + kwargs['str_printer'] = vsstrrepr + kwargs['pretty_printer'] = vpprint + kwargs['latex_printer'] = vlatex + init_printing(**kwargs) + + +params = init_printing.__doc__.split('Examples\n ========')[0] # type: ignore +init_vprinting.__doc__ = init_vprinting.__doc__.format(params) # type: ignore diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e1c51bdb8d403f71c87a117ada79b95f58a9298 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_dyadic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_dyadic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..275bf4a23e3efd61e085a7e64c4ac7a404f31791 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_dyadic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_fieldfunctions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_fieldfunctions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1291379420728ac375adad7f7f8c8324107b9d5a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_fieldfunctions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_frame.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92dd437e8b5fca664c0819be28fdfd8d1eb23ae3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_frame.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78fa5c2f1f07e68d4ecd6dbbbe91c7ff6089ff43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_output.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31681fd6e22d7d8f325746888f33cff0b496bec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_output.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_point.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_point.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b53a5354aee2dcd263b6bd2ef029010a2f9bffd4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_point.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_printing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_printing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e2d84964a66fed8530fe0ca4fad06e79431ab7f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_printing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_vector.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_vector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e3da9a8d973eadb7ab633d0f44a49873677cef2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/__pycache__/test_vector.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_dyadic.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_dyadic.py new file mode 100644 index 0000000000000000000000000000000000000000..e10f8a7293df7d8818dd1ef87aa19b075904c859 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_dyadic.py @@ -0,0 +1,124 @@ +from sympy.core.numbers import (Float, pi) +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix +from sympy.physics.vector import ReferenceFrame, Vector, dynamicsymbols, outer +from sympy.physics.vector.dyadic import _check_dyadic +from sympy.testing.pytest import raises + +Vector.simp = True +A = ReferenceFrame('A') + + +def test_dyadic(): + d1 = A.x | A.x + d2 = A.y | A.y + d3 = A.x | A.y + assert d1 * 0 == 0 + assert d1 != 0 + assert d1 * 2 == 2 * A.x | A.x + assert d1 / 2. == 0.5 * d1 + assert d1 & (0 * d1) == 0 + assert d1 & d2 == 0 + assert d1 & A.x == A.x + assert d1 ^ A.x == 0 + assert d1 ^ A.y == A.x | A.z + assert d1 ^ A.z == - A.x | A.y + assert d2 ^ A.x == - A.y | A.z + assert A.x ^ d1 == 0 + assert A.y ^ d1 == - A.z | A.x + assert A.z ^ d1 == A.y | A.x + assert A.x & d1 == A.x + assert A.y & d1 == 0 + assert A.y & d2 == A.y + assert d1 & d3 == A.x | A.y + assert d3 & d1 == 0 + assert d1.dt(A) == 0 + q = dynamicsymbols('q') + qd = dynamicsymbols('q', 1) + B = A.orientnew('B', 'Axis', [q, A.z]) + assert d1.express(B) == d1.express(B, B) + assert d1.express(B) == ((cos(q)**2) * (B.x | B.x) + (-sin(q) * cos(q)) * + (B.x | B.y) + (-sin(q) * cos(q)) * (B.y | B.x) + (sin(q)**2) * + (B.y | B.y)) + assert d1.express(B, A) == (cos(q)) * (B.x | A.x) + (-sin(q)) * (B.y | A.x) + assert d1.express(A, B) == (cos(q)) * (A.x | B.x) + (-sin(q)) * (A.x | B.y) + assert d1.dt(B) == (-qd) * (A.y | A.x) + (-qd) * (A.x | A.y) + + assert d1.to_matrix(A) == Matrix([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert d1.to_matrix(A, B) == Matrix([[cos(q), -sin(q), 0], + [0, 0, 0], + [0, 0, 0]]) + assert d3.to_matrix(A) == Matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + v1 = a * A.x + b * A.y + c * A.z + v2 = d * A.x + e * A.y + f * A.z + d4 = v1.outer(v2) + assert d4.to_matrix(A) == Matrix([[a * d, a * e, a * f], + [b * d, b * e, b * f], + [c * d, c * e, c * f]]) + d5 = v1.outer(v1) + C = A.orientnew('C', 'Axis', [q, A.x]) + for expected, actual in zip(C.dcm(A) * d5.to_matrix(A) * C.dcm(A).T, + d5.to_matrix(C)): + assert (expected - actual).simplify() == 0 + + raises(TypeError, lambda: d1.applyfunc(0)) + + +def test_dyadic_simplify(): + x, y, z, k, n, m, w, f, s, A = symbols('x, y, z, k, n, m, w, f, s, A') + N = ReferenceFrame('N') + + dy = N.x | N.x + test1 = (1 / x + 1 / y) * dy + assert (N.x & test1 & N.x) != (x + y) / (x * y) + test1 = test1.simplify() + assert (N.x & test1 & N.x) == (x + y) / (x * y) + + test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * dy + test2 = test2.simplify() + assert (N.x & test2 & N.x) == (A**2 * s**4 / (4 * pi * k * m**3)) + + test3 = ((4 + 4 * x - 2 * (2 + 2 * x)) / (2 + 2 * x)) * dy + test3 = test3.simplify() + assert (N.x & test3 & N.x) == 0 + + test4 = ((-4 * x * y**2 - 2 * y**3 - 2 * x**2 * y) / (x + y)**2) * dy + test4 = test4.simplify() + assert (N.x & test4 & N.x) == -2 * y + + +def test_dyadic_subs(): + N = ReferenceFrame('N') + s = symbols('s') + a = s*(N.x | N.x) + assert a.subs({s: 2}) == 2*(N.x | N.x) + + +def test_check_dyadic(): + raises(TypeError, lambda: _check_dyadic(0)) + + +def test_dyadic_evalf(): + N = ReferenceFrame('N') + a = pi * (N.x | N.x) + assert a.evalf(3) == Float('3.1416', 3) * (N.x | N.x) + s = symbols('s') + a = 5 * s * pi* (N.x | N.x) + assert a.evalf(2) == Float('5', 2) * Float('3.1416', 2) * s * (N.x | N.x) + assert a.evalf(9, subs={s: 5.124}) == Float('80.48760378', 9) * (N.x | N.x) + + +def test_dyadic_xreplace(): + x, y, z = symbols('x y z') + N = ReferenceFrame('N') + D = outer(N.x, N.x) + v = x*y * D + assert v.xreplace({x : cos(x)}) == cos(x)*y * D + assert v.xreplace({x*y : pi}) == pi * D + v = (x*y)**z * D + assert v.xreplace({(x*y)**z : 1}) == D + assert v.xreplace({x:1, z:0}) == D + raises(TypeError, lambda: v.xreplace()) + raises(TypeError, lambda: v.xreplace([x, y])) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_fieldfunctions.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_fieldfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..4e5c67aad44ca972dac6e455c57b60a74bae207a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_fieldfunctions.py @@ -0,0 +1,133 @@ +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.physics.vector import ReferenceFrame, Vector, Point, \ + dynamicsymbols +from sympy.physics.vector.fieldfunctions import divergence, \ + gradient, curl, is_conservative, is_solenoidal, \ + scalar_potential, scalar_potential_difference +from sympy.testing.pytest import raises + +R = ReferenceFrame('R') +q = dynamicsymbols('q') +P = R.orientnew('P', 'Axis', [q, R.z]) + + +def test_curl(): + assert curl(Vector(0), R) == Vector(0) + assert curl(R.x, R) == Vector(0) + assert curl(2*R[1]**2*R.y, R) == Vector(0) + assert curl(R[0]*R[1]*R.z, R) == R[0]*R.x - R[1]*R.y + assert curl(R[0]*R[1]*R[2] * (R.x+R.y+R.z), R) == \ + (-R[0]*R[1] + R[0]*R[2])*R.x + (R[0]*R[1] - R[1]*R[2])*R.y + \ + (-R[0]*R[2] + R[1]*R[2])*R.z + assert curl(2*R[0]**2*R.y, R) == 4*R[0]*R.z + assert curl(P[0]**2*R.x + P.y, R) == \ + - 2*(R[0]*cos(q) + R[1]*sin(q))*sin(q)*R.z + assert curl(P[0]*R.y, P) == cos(q)*P.z + + +def test_divergence(): + assert divergence(Vector(0), R) is S.Zero + assert divergence(R.x, R) is S.Zero + assert divergence(R[0]**2*R.x, R) == 2*R[0] + assert divergence(R[0]*R[1]*R[2] * (R.x+R.y+R.z), R) == \ + R[0]*R[1] + R[0]*R[2] + R[1]*R[2] + assert divergence((1/(R[0]*R[1]*R[2])) * (R.x+R.y+R.z), R) == \ + -1/(R[0]*R[1]*R[2]**2) - 1/(R[0]*R[1]**2*R[2]) - \ + 1/(R[0]**2*R[1]*R[2]) + v = P[0]*P.x + P[1]*P.y + P[2]*P.z + assert divergence(v, P) == 3 + assert divergence(v, R).simplify() == 3 + assert divergence(P[0]*R.x + R[0]*P.x, R) == 2*cos(q) + + +def test_gradient(): + a = Symbol('a') + assert gradient(0, R) == Vector(0) + assert gradient(R[0], R) == R.x + assert gradient(R[0]*R[1]*R[2], R) == \ + R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z + assert gradient(2*R[0]**2, R) == 4*R[0]*R.x + assert gradient(a*sin(R[1])/R[0], R) == \ + - a*sin(R[1])/R[0]**2*R.x + a*cos(R[1])/R[0]*R.y + assert gradient(P[0]*P[1], R) == \ + ((-R[0]*sin(q) + R[1]*cos(q))*cos(q) - (R[0]*cos(q) + R[1]*sin(q))*sin(q))*R.x + \ + ((-R[0]*sin(q) + R[1]*cos(q))*sin(q) + (R[0]*cos(q) + R[1]*sin(q))*cos(q))*R.y + assert gradient(P[0]*R[2], P) == P[2]*P.x + P[0]*P.z + + +scalar_field = 2*R[0]**2*R[1]*R[2] +grad_field = gradient(scalar_field, R) +vector_field = R[1]**2*R.x + 3*R[0]*R.y + 5*R[1]*R[2]*R.z +curl_field = curl(vector_field, R) + + +def test_conservative(): + assert is_conservative(0) is True + assert is_conservative(R.x) is True + assert is_conservative(2 * R.x + 3 * R.y + 4 * R.z) is True + assert is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) is \ + True + assert is_conservative(R[0] * R.y) is False + assert is_conservative(grad_field) is True + assert is_conservative(curl_field) is False + assert is_conservative(4*R[0]*R[1]*R[2]*R.x + 2*R[0]**2*R[2]*R.y) is \ + False + assert is_conservative(R[2]*P.x + P[0]*R.z) is True + + +def test_solenoidal(): + assert is_solenoidal(0) is True + assert is_solenoidal(R.x) is True + assert is_solenoidal(2 * R.x + 3 * R.y + 4 * R.z) is True + assert is_solenoidal(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) is \ + True + assert is_solenoidal(R[1] * R.y) is False + assert is_solenoidal(grad_field) is False + assert is_solenoidal(curl_field) is True + assert is_solenoidal((-2*R[1] + 3)*R.z) is True + assert is_solenoidal(cos(q)*R.x + sin(q)*R.y + cos(q)*P.z) is True + assert is_solenoidal(R[2]*P.x + P[0]*R.z) is True + + +def test_scalar_potential(): + assert scalar_potential(0, R) == 0 + assert scalar_potential(R.x, R) == R[0] + assert scalar_potential(R.y, R) == R[1] + assert scalar_potential(R.z, R) == R[2] + assert scalar_potential(R[1]*R[2]*R.x + R[0]*R[2]*R.y + \ + R[0]*R[1]*R.z, R) == R[0]*R[1]*R[2] + assert scalar_potential(grad_field, R) == scalar_field + assert scalar_potential(R[2]*P.x + P[0]*R.z, R) == \ + R[0]*R[2]*cos(q) + R[1]*R[2]*sin(q) + assert scalar_potential(R[2]*P.x + P[0]*R.z, P) == P[0]*P[2] + raises(ValueError, lambda: scalar_potential(R[0] * R.y, R)) + + +def test_scalar_potential_difference(): + origin = Point('O') + point1 = origin.locatenew('P1', 1*R.x + 2*R.y + 3*R.z) + point2 = origin.locatenew('P2', 4*R.x + 5*R.y + 6*R.z) + genericpointR = origin.locatenew('RP', R[0]*R.x + R[1]*R.y + R[2]*R.z) + genericpointP = origin.locatenew('PP', P[0]*P.x + P[1]*P.y + P[2]*P.z) + assert scalar_potential_difference(S.Zero, R, point1, point2, \ + origin) == 0 + assert scalar_potential_difference(scalar_field, R, origin, \ + genericpointR, origin) == \ + scalar_field + assert scalar_potential_difference(grad_field, R, origin, \ + genericpointR, origin) == \ + scalar_field + assert scalar_potential_difference(grad_field, R, point1, point2, + origin) == 948 + assert scalar_potential_difference(R[1]*R[2]*R.x + R[0]*R[2]*R.y + \ + R[0]*R[1]*R.z, R, point1, + genericpointR, origin) == \ + R[0]*R[1]*R[2] - 6 + potential_diff_P = 2*P[2]*(P[0]*sin(q) + P[1]*cos(q))*\ + (P[0]*cos(q) - P[1]*sin(q))**2 + assert scalar_potential_difference(grad_field, P, origin, \ + genericpointP, \ + origin).simplify() == \ + potential_diff_P diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_frame.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..69cbc97d745139038ee255b48ec2c0a4e88ce67c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_frame.py @@ -0,0 +1,660 @@ +from sympy.core.numbers import pi +from sympy.core.symbol import symbols +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.matrices.dense import (eye, zeros) +from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix +from sympy.simplify.simplify import simplify +from sympy.physics.vector import (ReferenceFrame, Vector, CoordinateSym, + dynamicsymbols, time_derivative, express, + dot) +from sympy.physics.vector.frame import _check_frame +from sympy.physics.vector.vector import VectorTypeError +from sympy.testing.pytest import raises +import warnings + +Vector.simp = True + + +def test_dict_list(): + + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + D = ReferenceFrame('D') + E = ReferenceFrame('E') + F = ReferenceFrame('F') + + B.orient_axis(A, A.x, 1.0) + C.orient_axis(B, B.x, 1.0) + D.orient_axis(C, C.x, 1.0) + + assert D._dict_list(A, 0) == [D, C, B, A] + + E.orient_axis(D, D.x, 1.0) + + assert C._dict_list(A, 0) == [C, B, A] + assert C._dict_list(E, 0) == [C, D, E] + + # only 0, 1, 2 permitted for second argument + raises(ValueError, lambda: C._dict_list(E, 5)) + # no connecting path + raises(ValueError, lambda: F._dict_list(A, 0)) + + +def test_coordinate_vars(): + """Tests the coordinate variables functionality""" + A = ReferenceFrame('A') + assert CoordinateSym('Ax', A, 0) == A[0] + assert CoordinateSym('Ax', A, 1) == A[1] + assert CoordinateSym('Ax', A, 2) == A[2] + raises(ValueError, lambda: CoordinateSym('Ax', A, 3)) + q = dynamicsymbols('q') + qd = dynamicsymbols('q', 1) + assert isinstance(A[0], CoordinateSym) and \ + isinstance(A[0], CoordinateSym) and \ + isinstance(A[0], CoordinateSym) + assert A.variable_map(A) == {A[0]:A[0], A[1]:A[1], A[2]:A[2]} + assert A[0].frame == A + B = A.orientnew('B', 'Axis', [q, A.z]) + assert B.variable_map(A) == {B[2]: A[2], B[1]: -A[0]*sin(q) + A[1]*cos(q), + B[0]: A[0]*cos(q) + A[1]*sin(q)} + assert A.variable_map(B) == {A[0]: B[0]*cos(q) - B[1]*sin(q), + A[1]: B[0]*sin(q) + B[1]*cos(q), A[2]: B[2]} + assert time_derivative(B[0], A) == -A[0]*sin(q)*qd + A[1]*cos(q)*qd + assert time_derivative(B[1], A) == -A[0]*cos(q)*qd - A[1]*sin(q)*qd + assert time_derivative(B[2], A) == 0 + assert express(B[0], A, variables=True) == A[0]*cos(q) + A[1]*sin(q) + assert express(B[1], A, variables=True) == -A[0]*sin(q) + A[1]*cos(q) + assert express(B[2], A, variables=True) == A[2] + assert time_derivative(A[0]*A.x + A[1]*A.y + A[2]*A.z, B) == A[1]*qd*A.x - A[0]*qd*A.y + assert time_derivative(B[0]*B.x + B[1]*B.y + B[2]*B.z, A) == - B[1]*qd*B.x + B[0]*qd*B.y + assert express(B[0]*B[1]*B[2], A, variables=True) == \ + A[2]*(-A[0]*sin(q) + A[1]*cos(q))*(A[0]*cos(q) + A[1]*sin(q)) + assert (time_derivative(B[0]*B[1]*B[2], A) - + (A[2]*(-A[0]**2*cos(2*q) - + 2*A[0]*A[1]*sin(2*q) + + A[1]**2*cos(2*q))*qd)).trigsimp() == 0 + assert express(B[0]*B.x + B[1]*B.y + B[2]*B.z, A) == \ + (B[0]*cos(q) - B[1]*sin(q))*A.x + (B[0]*sin(q) + \ + B[1]*cos(q))*A.y + B[2]*A.z + assert express(B[0]*B.x + B[1]*B.y + B[2]*B.z, A, variables=True) == \ + A[0]*A.x + A[1]*A.y + A[2]*A.z + assert express(A[0]*A.x + A[1]*A.y + A[2]*A.z, B) == \ + (A[0]*cos(q) + A[1]*sin(q))*B.x + \ + (-A[0]*sin(q) + A[1]*cos(q))*B.y + A[2]*B.z + assert express(A[0]*A.x + A[1]*A.y + A[2]*A.z, B, variables=True) == \ + B[0]*B.x + B[1]*B.y + B[2]*B.z + N = B.orientnew('N', 'Axis', [-q, B.z]) + assert N.variable_map(A) == {N[0]: A[0], N[2]: A[2], N[1]: A[1]} + C = A.orientnew('C', 'Axis', [q, A.x + A.y + A.z]) + mapping = A.variable_map(C) + assert mapping[A[0]] == 2*C[0]*cos(q)/3 + C[0]/3 - 2*C[1]*sin(q + pi/6)/3 +\ + C[1]/3 - 2*C[2]*cos(q + pi/3)/3 + C[2]/3 + assert mapping[A[1]] == -2*C[0]*cos(q + pi/3)/3 + \ + C[0]/3 + 2*C[1]*cos(q)/3 + C[1]/3 - 2*C[2]*sin(q + pi/6)/3 + C[2]/3 + assert mapping[A[2]] == -2*C[0]*sin(q + pi/6)/3 + C[0]/3 - \ + 2*C[1]*cos(q + pi/3)/3 + C[1]/3 + 2*C[2]*cos(q)/3 + C[2]/3 + + +def test_ang_vel(): + q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4') + q1d, q2d, q3d, q4d = dynamicsymbols('q1 q2 q3 q4', 1) + N = ReferenceFrame('N') + A = N.orientnew('A', 'Axis', [q1, N.z]) + B = A.orientnew('B', 'Axis', [q2, A.x]) + C = B.orientnew('C', 'Axis', [q3, B.y]) + D = N.orientnew('D', 'Axis', [q4, N.y]) + u1, u2, u3 = dynamicsymbols('u1 u2 u3') + assert A.ang_vel_in(N) == (q1d)*A.z + assert B.ang_vel_in(N) == (q2d)*B.x + (q1d)*A.z + assert C.ang_vel_in(N) == (q3d)*C.y + (q2d)*B.x + (q1d)*A.z + + A2 = N.orientnew('A2', 'Axis', [q4, N.y]) + assert N.ang_vel_in(N) == 0 + assert N.ang_vel_in(A) == -q1d*N.z + assert N.ang_vel_in(B) == -q1d*A.z - q2d*B.x + assert N.ang_vel_in(C) == -q1d*A.z - q2d*B.x - q3d*B.y + assert N.ang_vel_in(A2) == -q4d*N.y + + assert A.ang_vel_in(N) == q1d*N.z + assert A.ang_vel_in(A) == 0 + assert A.ang_vel_in(B) == - q2d*B.x + assert A.ang_vel_in(C) == - q2d*B.x - q3d*B.y + assert A.ang_vel_in(A2) == q1d*N.z - q4d*N.y + + assert B.ang_vel_in(N) == q1d*A.z + q2d*A.x + assert B.ang_vel_in(A) == q2d*A.x + assert B.ang_vel_in(B) == 0 + assert B.ang_vel_in(C) == -q3d*B.y + assert B.ang_vel_in(A2) == q1d*A.z + q2d*A.x - q4d*N.y + + assert C.ang_vel_in(N) == q1d*A.z + q2d*A.x + q3d*B.y + assert C.ang_vel_in(A) == q2d*A.x + q3d*C.y + assert C.ang_vel_in(B) == q3d*B.y + assert C.ang_vel_in(C) == 0 + assert C.ang_vel_in(A2) == q1d*A.z + q2d*A.x + q3d*B.y - q4d*N.y + + assert A2.ang_vel_in(N) == q4d*A2.y + assert A2.ang_vel_in(A) == q4d*A2.y - q1d*N.z + assert A2.ang_vel_in(B) == q4d*N.y - q1d*A.z - q2d*A.x + assert A2.ang_vel_in(C) == q4d*N.y - q1d*A.z - q2d*A.x - q3d*B.y + assert A2.ang_vel_in(A2) == 0 + + C.set_ang_vel(N, u1*C.x + u2*C.y + u3*C.z) + assert C.ang_vel_in(N) == (u1)*C.x + (u2)*C.y + (u3)*C.z + assert N.ang_vel_in(C) == (-u1)*C.x + (-u2)*C.y + (-u3)*C.z + assert C.ang_vel_in(D) == (u1)*C.x + (u2)*C.y + (u3)*C.z + (-q4d)*D.y + assert D.ang_vel_in(C) == (-u1)*C.x + (-u2)*C.y + (-u3)*C.z + (q4d)*D.y + + q0 = dynamicsymbols('q0') + q0d = dynamicsymbols('q0', 1) + E = N.orientnew('E', 'Quaternion', (q0, q1, q2, q3)) + assert E.ang_vel_in(N) == ( + 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1) * E.x + + 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2) * E.y + + 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3) * E.z) + + F = N.orientnew('F', 'Body', (q1, q2, q3), 313) + assert F.ang_vel_in(N) == ((sin(q2)*sin(q3)*q1d + cos(q3)*q2d)*F.x + + (sin(q2)*cos(q3)*q1d - sin(q3)*q2d)*F.y + (cos(q2)*q1d + q3d)*F.z) + G = N.orientnew('G', 'Axis', (q1, N.x + N.y)) + assert G.ang_vel_in(N) == q1d * (N.x + N.y).normalize() + assert N.ang_vel_in(G) == -q1d * (N.x + N.y).normalize() + + +def test_dcm(): + q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4') + N = ReferenceFrame('N') + A = N.orientnew('A', 'Axis', [q1, N.z]) + B = A.orientnew('B', 'Axis', [q2, A.x]) + C = B.orientnew('C', 'Axis', [q3, B.y]) + D = N.orientnew('D', 'Axis', [q4, N.y]) + E = N.orientnew('E', 'Space', [q1, q2, q3], '123') + assert N.dcm(C) == Matrix([ + [- sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), - sin(q1) * + cos(q2), sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], [sin(q1) * + cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), sin(q1) * + sin(q3) - sin(q2) * cos(q1) * cos(q3)], [- sin(q3) * cos(q2), sin(q2), + cos(q2) * cos(q3)]]) + # This is a little touchy. Is it ok to use simplify in assert? + test_mat = D.dcm(C) - Matrix( + [[cos(q1) * cos(q3) * cos(q4) - sin(q3) * (- sin(q4) * cos(q2) + + sin(q1) * sin(q2) * cos(q4)), - sin(q2) * sin(q4) - sin(q1) * + cos(q2) * cos(q4), sin(q3) * cos(q1) * cos(q4) + cos(q3) * (- sin(q4) * + cos(q2) + sin(q1) * sin(q2) * cos(q4))], [sin(q1) * cos(q3) + + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), sin(q1) * sin(q3) - + sin(q2) * cos(q1) * cos(q3)], [sin(q4) * cos(q1) * cos(q3) - + sin(q3) * (cos(q2) * cos(q4) + sin(q1) * sin(q2) * sin(q4)), sin(q2) * + cos(q4) - sin(q1) * sin(q4) * cos(q2), sin(q3) * sin(q4) * cos(q1) + + cos(q3) * (cos(q2) * cos(q4) + sin(q1) * sin(q2) * sin(q4))]]) + assert test_mat.expand() == zeros(3, 3) + assert E.dcm(N) == Matrix( + [[cos(q2)*cos(q3), sin(q3)*cos(q2), -sin(q2)], + [sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + + cos(q1)*cos(q3), sin(q1)*cos(q2)], [sin(q1)*sin(q3) + + sin(q2)*cos(q1)*cos(q3), - sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), + cos(q1)*cos(q2)]]) + +def test_w_diff_dcm1(): + # Ref: + # Dynamics Theory and Applications, Kane 1985 + # Sec. 2.1 ANGULAR VELOCITY + A = ReferenceFrame('A') + B = ReferenceFrame('B') + + c11, c12, c13 = dynamicsymbols('C11 C12 C13') + c21, c22, c23 = dynamicsymbols('C21 C22 C23') + c31, c32, c33 = dynamicsymbols('C31 C32 C33') + + c11d, c12d, c13d = dynamicsymbols('C11 C12 C13', level=1) + c21d, c22d, c23d = dynamicsymbols('C21 C22 C23', level=1) + c31d, c32d, c33d = dynamicsymbols('C31 C32 C33', level=1) + + DCM = Matrix([ + [c11, c12, c13], + [c21, c22, c23], + [c31, c32, c33] + ]) + + B.orient(A, 'DCM', DCM) + b1a = (B.x).express(A) + b2a = (B.y).express(A) + b3a = (B.z).express(A) + + # Equation (2.1.1) + B.set_ang_vel(A, B.x*(dot((b3a).dt(A), B.y)) + + B.y*(dot((b1a).dt(A), B.z)) + + B.z*(dot((b2a).dt(A), B.x))) + + # Equation (2.1.21) + expr = ( (c12*c13d + c22*c23d + c32*c33d)*B.x + + (c13*c11d + c23*c21d + c33*c31d)*B.y + + (c11*c12d + c21*c22d + c31*c32d)*B.z) + assert B.ang_vel_in(A) - expr == 0 + +def test_w_diff_dcm2(): + q1, q2, q3 = dynamicsymbols('q1:4') + N = ReferenceFrame('N') + A = N.orientnew('A', 'axis', [q1, N.x]) + B = A.orientnew('B', 'axis', [q2, A.y]) + C = B.orientnew('C', 'axis', [q3, B.z]) + + DCM = C.dcm(N).T + D = N.orientnew('D', 'DCM', DCM) + + # Frames D and C are the same ReferenceFrame, + # since they have equal DCM respect to frame N. + # Therefore, D and C should have same angle velocity in N. + assert D.dcm(N) == C.dcm(N) == Matrix([ + [cos(q2)*cos(q3), sin(q1)*sin(q2)*cos(q3) + + sin(q3)*cos(q1), sin(q1)*sin(q3) - + sin(q2)*cos(q1)*cos(q3)], [-sin(q3)*cos(q2), + -sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), + sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], + [sin(q2), -sin(q1)*cos(q2), cos(q1)*cos(q2)]]) + assert (D.ang_vel_in(N) - C.ang_vel_in(N)).express(N).simplify() == 0 + +def test_orientnew_respects_parent_class(): + class MyReferenceFrame(ReferenceFrame): + pass + B = MyReferenceFrame('B') + C = B.orientnew('C', 'Axis', [0, B.x]) + assert isinstance(C, MyReferenceFrame) + + +def test_orientnew_respects_input_indices(): + N = ReferenceFrame('N') + q1 = dynamicsymbols('q1') + A = N.orientnew('a', 'Axis', [q1, N.z]) + #modify default indices: + minds = [x+'1' for x in N.indices] + B = N.orientnew('b', 'Axis', [q1, N.z], indices=minds) + + assert N.indices == A.indices + assert B.indices == minds + +def test_orientnew_respects_input_latexs(): + N = ReferenceFrame('N') + q1 = dynamicsymbols('q1') + A = N.orientnew('a', 'Axis', [q1, N.z]) + + #build default and alternate latex_vecs: + def_latex_vecs = [(r"\mathbf{\hat{%s}_%s}" % (A.name.lower(), + A.indices[0])), (r"\mathbf{\hat{%s}_%s}" % + (A.name.lower(), A.indices[1])), + (r"\mathbf{\hat{%s}_%s}" % (A.name.lower(), + A.indices[2]))] + + name = 'b' + indices = [x+'1' for x in N.indices] + new_latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), + indices[0])), (r"\mathbf{\hat{%s}_{%s}}" % + (name.lower(), indices[1])), + (r"\mathbf{\hat{%s}_{%s}}" % (name.lower(), + indices[2]))] + + B = N.orientnew(name, 'Axis', [q1, N.z], latexs=new_latex_vecs) + + assert A.latex_vecs == def_latex_vecs + assert B.latex_vecs == new_latex_vecs + assert B.indices != indices + +def test_orientnew_respects_input_variables(): + N = ReferenceFrame('N') + q1 = dynamicsymbols('q1') + A = N.orientnew('a', 'Axis', [q1, N.z]) + + #build non-standard variable names + name = 'b' + new_variables = ['notb_'+x+'1' for x in N.indices] + B = N.orientnew(name, 'Axis', [q1, N.z], variables=new_variables) + + for j,var in enumerate(A.varlist): + assert var.name == A.name + '_' + A.indices[j] + + for j,var in enumerate(B.varlist): + assert var.name == new_variables[j] + +def test_issue_10348(): + u = dynamicsymbols('u:3') + I = ReferenceFrame('I') + I.orientnew('A', 'space', u, 'XYZ') + + +def test_issue_11503(): + A = ReferenceFrame("A") + A.orientnew("B", "Axis", [35, A.y]) + C = ReferenceFrame("C") + A.orient(C, "Axis", [70, C.z]) + + +def test_partial_velocity(): + + N = ReferenceFrame('N') + A = ReferenceFrame('A') + + u1, u2 = dynamicsymbols('u1, u2') + + A.set_ang_vel(N, u1 * A.x + u2 * N.y) + + assert N.partial_velocity(A, u1) == -A.x + assert N.partial_velocity(A, u1, u2) == (-A.x, -N.y) + + assert A.partial_velocity(N, u1) == A.x + assert A.partial_velocity(N, u1, u2) == (A.x, N.y) + + assert N.partial_velocity(N, u1) == 0 + assert A.partial_velocity(A, u1) == 0 + + +def test_issue_11498(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + + # Identity transformation + A.orient(B, 'DCM', eye(3)) + assert A.dcm(B) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert B.dcm(A) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + + # x -> y + # y -> -z + # z -> -x + A.orient(B, 'DCM', Matrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])) + assert B.dcm(A) == Matrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]) + assert A.dcm(B) == Matrix([[0, 0, -1], [1, 0, 0], [0, -1, 0]]) + assert B.dcm(A).T == A.dcm(B) + + +def test_reference_frame(): + raises(TypeError, lambda: ReferenceFrame(0)) + raises(TypeError, lambda: ReferenceFrame('N', 0)) + raises(ValueError, lambda: ReferenceFrame('N', [0, 1])) + raises(TypeError, lambda: ReferenceFrame('N', [0, 1, 2])) + raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], 0)) + raises(ValueError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], [0, 1])) + raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], [0, 1, 2])) + raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], + ['a', 'b', 'c'], 0)) + raises(ValueError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], + ['a', 'b', 'c'], [0, 1])) + raises(TypeError, lambda: ReferenceFrame('N', ['a', 'b', 'c'], + ['a', 'b', 'c'], [0, 1, 2])) + N = ReferenceFrame('N') + assert N[0] == CoordinateSym('N_x', N, 0) + assert N[1] == CoordinateSym('N_y', N, 1) + assert N[2] == CoordinateSym('N_z', N, 2) + raises(ValueError, lambda: N[3]) + N = ReferenceFrame('N', ['a', 'b', 'c']) + assert N['a'] == N.x + assert N['b'] == N.y + assert N['c'] == N.z + raises(ValueError, lambda: N['d']) + assert str(N) == 'N' + + A = ReferenceFrame('A') + B = ReferenceFrame('B') + q0, q1, q2, q3 = symbols('q0 q1 q2 q3') + raises(TypeError, lambda: A.orient(B, 'DCM', 0)) + raises(TypeError, lambda: B.orient(N, 'Space', [q1, q2, q3], '222')) + raises(TypeError, lambda: B.orient(N, 'Axis', [q1, N.x + 2 * N.y], '222')) + raises(TypeError, lambda: B.orient(N, 'Axis', q1)) + raises(IndexError, lambda: B.orient(N, 'Axis', [q1])) + raises(TypeError, lambda: B.orient(N, 'Quaternion', [q0, q1, q2, q3], '222')) + raises(TypeError, lambda: B.orient(N, 'Quaternion', q0)) + raises(TypeError, lambda: B.orient(N, 'Quaternion', [q0, q1, q2])) + raises(NotImplementedError, lambda: B.orient(N, 'Foo', [q0, q1, q2])) + raises(TypeError, lambda: B.orient(N, 'Body', [q1, q2], '232')) + raises(TypeError, lambda: B.orient(N, 'Space', [q1, q2], '232')) + + N.set_ang_acc(B, 0) + assert N.ang_acc_in(B) == Vector(0) + N.set_ang_vel(B, 0) + assert N.ang_vel_in(B) == Vector(0) + + +def test_check_frame(): + raises(VectorTypeError, lambda: _check_frame(0)) + + +def test_dcm_diff_16824(): + # NOTE : This is a regression test for the bug introduced in PR 14758, + # identified in 16824, and solved by PR 16828. + + # This is the solution to Problem 2.2 on page 264 in Kane & Lenvinson's + # 1985 book. + + q1, q2, q3 = dynamicsymbols('q1:4') + + s1 = sin(q1) + c1 = cos(q1) + s2 = sin(q2) + c2 = cos(q2) + s3 = sin(q3) + c3 = cos(q3) + + dcm = Matrix([[c2*c3, s1*s2*c3 - s3*c1, c1*s2*c3 + s3*s1], + [c2*s3, s1*s2*s3 + c3*c1, c1*s2*s3 - c3*s1], + [-s2, s1*c2, c1*c2]]) + + A = ReferenceFrame('A') + B = ReferenceFrame('B') + B.orient(A, 'DCM', dcm) + + AwB = B.ang_vel_in(A) + + alpha2 = s3*c2*q1.diff() + c3*q2.diff() + beta2 = s1*c2*q3.diff() + c1*q2.diff() + + assert simplify(AwB.dot(A.y) - alpha2) == 0 + assert simplify(AwB.dot(B.y) - beta2) == 0 + +def test_orient_explicit(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + A.orient_explicit(B, eye(3)) + assert A.dcm(B) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + +def test_orient_axis(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + A.orient_axis(B,-B.x, 1) + A1 = A.dcm(B) + A.orient_axis(B, B.x, -1) + A2 = A.dcm(B) + A.orient_axis(B, 1, -B.x) + A3 = A.dcm(B) + assert A1 == A2 + assert A2 == A3 + raises(TypeError, lambda: A.orient_axis(B, 1, 1)) + +def test_orient_body(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + B.orient_body_fixed(A, (1,1,0), 'XYX') + assert B.dcm(A) == Matrix([[cos(1), sin(1)**2, -sin(1)*cos(1)], [0, cos(1), sin(1)], [sin(1), -sin(1)*cos(1), cos(1)**2]]) + + +def test_orient_body_advanced(): + q1, q2, q3 = dynamicsymbols('q1:4') + c1, c2, c3 = symbols('c1:4') + u1, u2, u3 = dynamicsymbols('q1:4', 1) + + # Test with everything as dynamicsymbols + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_body_fixed(A, (q1, q2, q3), 'zxy') + assert A.dcm(B) == Matrix([ + [-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2), + sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], + [sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), + sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)], + [-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]]) + assert B.ang_vel_in(A).to_matrix(B) == Matrix([ + [-sin(q3) * cos(q2) * u1 + cos(q3) * u2], + [sin(q2) * u1 + u3], + [sin(q3) * u2 + cos(q2) * cos(q3) * u1]]) + + # Test with constant symbol + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_body_fixed(A, (q1, c2, q3), 131) + assert A.dcm(B) == Matrix([ + [cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)], + [sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3), + -sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)], + [sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1), + -sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]]) + assert B.ang_vel_in(A).to_matrix(B) == Matrix([ + [cos(c2) * u1 + u3], + [-sin(c2) * cos(q3) * u1], + [sin(c2) * sin(q3) * u1]]) + + # Test all symbols not time dependent + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_body_fixed(A, (c1, c2, c3), 123) + assert B.ang_vel_in(A) == Vector(0) + + +def test_orient_space_advanced(): + # space fixed is in the end like body fixed only in opposite order + q1, q2, q3 = dynamicsymbols('q1:4') + c1, c2, c3 = symbols('c1:4') + u1, u2, u3 = dynamicsymbols('q1:4', 1) + + # Test with everything as dynamicsymbols + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_space_fixed(A, (q3, q2, q1), 'yxz') + assert A.dcm(B) == Matrix([ + [-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2), + sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], + [sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2), + sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)], + [-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]]) + assert B.ang_vel_in(A).to_matrix(B) == Matrix([ + [-sin(q3) * cos(q2) * u1 + cos(q3) * u2], + [sin(q2) * u1 + u3], + [sin(q3) * u2 + cos(q2) * cos(q3) * u1]]) + + # Test with constant symbol + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_space_fixed(A, (q3, c2, q1), 131) + assert A.dcm(B) == Matrix([ + [cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)], + [sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3), + -sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)], + [sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1), + -sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]]) + assert B.ang_vel_in(A).to_matrix(B) == Matrix([ + [cos(c2) * u1 + u3], + [-sin(c2) * cos(q3) * u1], + [sin(c2) * sin(q3) * u1]]) + + # Test all symbols not time dependent + A, B = ReferenceFrame('A'), ReferenceFrame('B') + B.orient_space_fixed(A, (c1, c2, c3), 123) + assert B.ang_vel_in(A) == Vector(0) + + +def test_orient_body_simple_ang_vel(): + """This test ensures that the simplest form of that linear system solution + is returned, thus the == for the expression comparison.""" + + psi, theta, phi = dynamicsymbols('psi, theta, varphi') + t = dynamicsymbols._t + A = ReferenceFrame('A') + B = ReferenceFrame('B') + B.orient_body_fixed(A, (psi, theta, phi), 'ZXZ') + A_w_B = B.ang_vel_in(A) + assert A_w_B.args[0][1] == B + assert A_w_B.args[0][0][0] == (sin(theta)*sin(phi)*psi.diff(t) + + cos(phi)*theta.diff(t)) + assert A_w_B.args[0][0][1] == (sin(theta)*cos(phi)*psi.diff(t) - + sin(phi)*theta.diff(t)) + assert A_w_B.args[0][0][2] == cos(theta)*psi.diff(t) + phi.diff(t) + + +def test_orient_space(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + B.orient_space_fixed(A, (0,0,0), '123') + assert B.dcm(A) == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + +def test_orient_quaternion(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + B.orient_quaternion(A, (0,0,0,0)) + assert B.dcm(A) == Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + +def test_looped_frame_warning(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + + a, b, c = symbols('a b c') + B.orient_axis(A, A.x, a) + C.orient_axis(B, B.x, b) + + with warnings.catch_warnings(record = True) as w: + warnings.simplefilter("always") + A.orient_axis(C, C.x, c) + assert issubclass(w[-1].category, UserWarning) + assert 'Loops are defined among the orientation of frames. ' + \ + 'This is likely not desired and may cause errors in your calculations.' in str(w[-1].message) + +def test_frame_dict(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + + a, b, c = symbols('a b c') + + B.orient_axis(A, A.x, a) + assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])} + assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]])} + assert C._dcm_dict == {} + + B.orient_axis(C, C.x, b) + # Previous relation is not wiped + assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])} + assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]]), \ + C: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]])} + assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])} + + A.orient_axis(B, B.x, c) + # Previous relation is updated + assert B._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]]),\ + A: Matrix([[1, 0, 0],[0, cos(c), -sin(c)],[0, sin(c), cos(c)]])} + assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(c), sin(c)],[0, -sin(c), cos(c)]])} + assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])} + +def test_dcm_cache_dict(): + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + D = ReferenceFrame('D') + + a, b, c = symbols('a b c') + + B.orient_axis(A, A.x, a) + C.orient_axis(B, B.x, b) + D.orient_axis(C, C.x, c) + + assert D._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(c), sin(c)],[0, -sin(c), cos(c)]])} + assert C._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]]), \ + D: Matrix([[1, 0, 0],[0, cos(c), -sin(c)],[0, sin(c), cos(c)]])} + assert B._dcm_dict == {A: Matrix([[1, 0, 0],[0, cos(a), sin(a)],[0, -sin(a), cos(a)]]), \ + C: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])} + assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(a), -sin(a)],[0, sin(a), cos(a)]])} + + assert D._dcm_dict == D._dcm_cache + + D.dcm(A) # Check calculated dcm relation is stored in _dcm_cache and not in _dcm_dict + assert list(A._dcm_cache.keys()) == [A, B, D] + assert list(D._dcm_cache.keys()) == [C, A] + assert list(A._dcm_dict.keys()) == [B] + assert list(D._dcm_dict.keys()) == [C] + assert A._dcm_dict != A._dcm_cache + + A.orient_axis(B, B.x, b) # _dcm_cache of A is wiped out and new relation is stored. + assert A._dcm_dict == {B: Matrix([[1, 0, 0],[0, cos(b), sin(b)],[0, -sin(b), cos(b)]])} + assert A._dcm_dict == A._dcm_cache + assert B._dcm_dict == {C: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]]), \ + A: Matrix([[1, 0, 0],[0, cos(b), -sin(b)],[0, sin(b), cos(b)]])} diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_functions.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..4c37a1890c2448995a4e5e8b7ea2dddda040f518 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_functions.py @@ -0,0 +1,508 @@ +from sympy.core.numbers import pi +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.integrals.integrals import Integral +from sympy.physics.vector import Dyadic, Point, ReferenceFrame, Vector +from sympy.physics.vector.functions import (cross, dot, express, + time_derivative, + kinematic_equations, outer, + partial_velocity, + get_motion_params, dynamicsymbols) +from sympy.testing.pytest import raises + +Vector.simp = True +q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5') +N = ReferenceFrame('N') +A = N.orientnew('A', 'Axis', [q1, N.z]) +B = A.orientnew('B', 'Axis', [q2, A.x]) +C = B.orientnew('C', 'Axis', [q3, B.y]) + + +def test_dot(): + assert dot(A.x, A.x) == 1 + assert dot(A.x, A.y) == 0 + assert dot(A.x, A.z) == 0 + + assert dot(A.y, A.x) == 0 + assert dot(A.y, A.y) == 1 + assert dot(A.y, A.z) == 0 + + assert dot(A.z, A.x) == 0 + assert dot(A.z, A.y) == 0 + assert dot(A.z, A.z) == 1 + + +def test_dot_different_frames(): + assert dot(N.x, A.x) == cos(q1) + assert dot(N.x, A.y) == -sin(q1) + assert dot(N.x, A.z) == 0 + assert dot(N.y, A.x) == sin(q1) + assert dot(N.y, A.y) == cos(q1) + assert dot(N.y, A.z) == 0 + assert dot(N.z, A.x) == 0 + assert dot(N.z, A.y) == 0 + assert dot(N.z, A.z) == 1 + + assert dot(N.x, A.x + A.y) == sqrt(2)*cos(q1 + pi/4) == dot(A.x + A.y, N.x) + + assert dot(A.x, C.x) == cos(q3) + assert dot(A.x, C.y) == 0 + assert dot(A.x, C.z) == sin(q3) + assert dot(A.y, C.x) == sin(q2)*sin(q3) + assert dot(A.y, C.y) == cos(q2) + assert dot(A.y, C.z) == -sin(q2)*cos(q3) + assert dot(A.z, C.x) == -cos(q2)*sin(q3) + assert dot(A.z, C.y) == sin(q2) + assert dot(A.z, C.z) == cos(q2)*cos(q3) + + +def test_cross(): + assert cross(A.x, A.x) == 0 + assert cross(A.x, A.y) == A.z + assert cross(A.x, A.z) == -A.y + + assert cross(A.y, A.x) == -A.z + assert cross(A.y, A.y) == 0 + assert cross(A.y, A.z) == A.x + + assert cross(A.z, A.x) == A.y + assert cross(A.z, A.y) == -A.x + assert cross(A.z, A.z) == 0 + + +def test_cross_different_frames(): + assert cross(N.x, A.x) == sin(q1)*A.z + assert cross(N.x, A.y) == cos(q1)*A.z + assert cross(N.x, A.z) == -sin(q1)*A.x - cos(q1)*A.y + assert cross(N.y, A.x) == -cos(q1)*A.z + assert cross(N.y, A.y) == sin(q1)*A.z + assert cross(N.y, A.z) == cos(q1)*A.x - sin(q1)*A.y + assert cross(N.z, A.x) == A.y + assert cross(N.z, A.y) == -A.x + assert cross(N.z, A.z) == 0 + + assert cross(N.x, A.x) == sin(q1)*A.z + assert cross(N.x, A.y) == cos(q1)*A.z + assert cross(N.x, A.x + A.y) == sin(q1)*A.z + cos(q1)*A.z + assert cross(A.x + A.y, N.x) == -sin(q1)*A.z - cos(q1)*A.z + + assert cross(A.x, C.x) == sin(q3)*C.y + assert cross(A.x, C.y) == -sin(q3)*C.x + cos(q3)*C.z + assert cross(A.x, C.z) == -cos(q3)*C.y + assert cross(C.x, A.x) == -sin(q3)*C.y + assert cross(C.y, A.x) == sin(q3)*C.x - cos(q3)*C.z + assert cross(C.z, A.x) == cos(q3)*C.y + +def test_operator_match(): + """Test that the output of dot, cross, outer functions match + operator behavior. + """ + A = ReferenceFrame('A') + v = A.x + A.y + d = v | v + zerov = Vector(0) + zerod = Dyadic(0) + + # dot products + assert d & d == dot(d, d) + assert d & zerod == dot(d, zerod) + assert zerod & d == dot(zerod, d) + assert d & v == dot(d, v) + assert v & d == dot(v, d) + assert d & zerov == dot(d, zerov) + assert zerov & d == dot(zerov, d) + raises(TypeError, lambda: dot(d, S.Zero)) + raises(TypeError, lambda: dot(S.Zero, d)) + raises(TypeError, lambda: dot(d, 0)) + raises(TypeError, lambda: dot(0, d)) + assert v & v == dot(v, v) + assert v & zerov == dot(v, zerov) + assert zerov & v == dot(zerov, v) + raises(TypeError, lambda: dot(v, S.Zero)) + raises(TypeError, lambda: dot(S.Zero, v)) + raises(TypeError, lambda: dot(v, 0)) + raises(TypeError, lambda: dot(0, v)) + + # cross products + raises(TypeError, lambda: cross(d, d)) + raises(TypeError, lambda: cross(d, zerod)) + raises(TypeError, lambda: cross(zerod, d)) + assert d ^ v == cross(d, v) + assert v ^ d == cross(v, d) + assert d ^ zerov == cross(d, zerov) + assert zerov ^ d == cross(zerov, d) + assert zerov ^ d == cross(zerov, d) + raises(TypeError, lambda: cross(d, S.Zero)) + raises(TypeError, lambda: cross(S.Zero, d)) + raises(TypeError, lambda: cross(d, 0)) + raises(TypeError, lambda: cross(0, d)) + assert v ^ v == cross(v, v) + assert v ^ zerov == cross(v, zerov) + assert zerov ^ v == cross(zerov, v) + raises(TypeError, lambda: cross(v, S.Zero)) + raises(TypeError, lambda: cross(S.Zero, v)) + raises(TypeError, lambda: cross(v, 0)) + raises(TypeError, lambda: cross(0, v)) + + # outer products + raises(TypeError, lambda: outer(d, d)) + raises(TypeError, lambda: outer(d, zerod)) + raises(TypeError, lambda: outer(zerod, d)) + raises(TypeError, lambda: outer(d, v)) + raises(TypeError, lambda: outer(v, d)) + raises(TypeError, lambda: outer(d, zerov)) + raises(TypeError, lambda: outer(zerov, d)) + raises(TypeError, lambda: outer(zerov, d)) + raises(TypeError, lambda: outer(d, S.Zero)) + raises(TypeError, lambda: outer(S.Zero, d)) + raises(TypeError, lambda: outer(d, 0)) + raises(TypeError, lambda: outer(0, d)) + assert v | v == outer(v, v) + assert v | zerov == outer(v, zerov) + assert zerov | v == outer(zerov, v) + raises(TypeError, lambda: outer(v, S.Zero)) + raises(TypeError, lambda: outer(S.Zero, v)) + raises(TypeError, lambda: outer(v, 0)) + raises(TypeError, lambda: outer(0, v)) + + +def test_express(): + assert express(Vector(0), N) == Vector(0) + assert express(S.Zero, N) is S.Zero + assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z + assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \ + sin(q2)*cos(q3)*C.z + assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \ + cos(q2)*cos(q3)*C.z + assert express(A.x, N) == cos(q1)*N.x + sin(q1)*N.y + assert express(A.y, N) == -sin(q1)*N.x + cos(q1)*N.y + assert express(A.z, N) == N.z + assert express(A.x, A) == A.x + assert express(A.y, A) == A.y + assert express(A.z, A) == A.z + assert express(A.x, B) == B.x + assert express(A.y, B) == cos(q2)*B.y - sin(q2)*B.z + assert express(A.z, B) == sin(q2)*B.y + cos(q2)*B.z + assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z + assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \ + sin(q2)*cos(q3)*C.z + assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \ + cos(q2)*cos(q3)*C.z + # Check to make sure UnitVectors get converted properly + assert express(N.x, N) == N.x + assert express(N.y, N) == N.y + assert express(N.z, N) == N.z + assert express(N.x, A) == (cos(q1)*A.x - sin(q1)*A.y) + assert express(N.y, A) == (sin(q1)*A.x + cos(q1)*A.y) + assert express(N.z, A) == A.z + assert express(N.x, B) == (cos(q1)*B.x - sin(q1)*cos(q2)*B.y + + sin(q1)*sin(q2)*B.z) + assert express(N.y, B) == (sin(q1)*B.x + cos(q1)*cos(q2)*B.y - + sin(q2)*cos(q1)*B.z) + assert express(N.z, B) == (sin(q2)*B.y + cos(q2)*B.z) + assert express(N.x, C) == ( + (cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*C.x - + sin(q1)*cos(q2)*C.y + + (sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*C.z) + assert express(N.y, C) == ( + (sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x + + cos(q1)*cos(q2)*C.y + + (sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z) + assert express(N.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y + + cos(q2)*cos(q3)*C.z) + + assert express(A.x, N) == (cos(q1)*N.x + sin(q1)*N.y) + assert express(A.y, N) == (-sin(q1)*N.x + cos(q1)*N.y) + assert express(A.z, N) == N.z + assert express(A.x, A) == A.x + assert express(A.y, A) == A.y + assert express(A.z, A) == A.z + assert express(A.x, B) == B.x + assert express(A.y, B) == (cos(q2)*B.y - sin(q2)*B.z) + assert express(A.z, B) == (sin(q2)*B.y + cos(q2)*B.z) + assert express(A.x, C) == (cos(q3)*C.x + sin(q3)*C.z) + assert express(A.y, C) == (sin(q2)*sin(q3)*C.x + cos(q2)*C.y - + sin(q2)*cos(q3)*C.z) + assert express(A.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y + + cos(q2)*cos(q3)*C.z) + + assert express(B.x, N) == (cos(q1)*N.x + sin(q1)*N.y) + assert express(B.y, N) == (-sin(q1)*cos(q2)*N.x + + cos(q1)*cos(q2)*N.y + sin(q2)*N.z) + assert express(B.z, N) == (sin(q1)*sin(q2)*N.x - + sin(q2)*cos(q1)*N.y + cos(q2)*N.z) + assert express(B.x, A) == A.x + assert express(B.y, A) == (cos(q2)*A.y + sin(q2)*A.z) + assert express(B.z, A) == (-sin(q2)*A.y + cos(q2)*A.z) + assert express(B.x, B) == B.x + assert express(B.y, B) == B.y + assert express(B.z, B) == B.z + assert express(B.x, C) == (cos(q3)*C.x + sin(q3)*C.z) + assert express(B.y, C) == C.y + assert express(B.z, C) == (-sin(q3)*C.x + cos(q3)*C.z) + + assert express(C.x, N) == ( + (cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*N.x + + (sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*N.y - + sin(q3)*cos(q2)*N.z) + assert express(C.y, N) == ( + -sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z) + assert express(C.z, N) == ( + (sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*N.x + + (sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*N.y + + cos(q2)*cos(q3)*N.z) + assert express(C.x, A) == (cos(q3)*A.x + sin(q2)*sin(q3)*A.y - + sin(q3)*cos(q2)*A.z) + assert express(C.y, A) == (cos(q2)*A.y + sin(q2)*A.z) + assert express(C.z, A) == (sin(q3)*A.x - sin(q2)*cos(q3)*A.y + + cos(q2)*cos(q3)*A.z) + assert express(C.x, B) == (cos(q3)*B.x - sin(q3)*B.z) + assert express(C.y, B) == B.y + assert express(C.z, B) == (sin(q3)*B.x + cos(q3)*B.z) + assert express(C.x, C) == C.x + assert express(C.y, C) == C.y + assert express(C.z, C) == C.z == (C.z) + + # Check to make sure Vectors get converted back to UnitVectors + assert N.x == express((cos(q1)*A.x - sin(q1)*A.y), N) + assert N.y == express((sin(q1)*A.x + cos(q1)*A.y), N) + assert N.x == express((cos(q1)*B.x - sin(q1)*cos(q2)*B.y + + sin(q1)*sin(q2)*B.z), N) + assert N.y == express((sin(q1)*B.x + cos(q1)*cos(q2)*B.y - + sin(q2)*cos(q1)*B.z), N) + assert N.z == express((sin(q2)*B.y + cos(q2)*B.z), N) + + """ + These don't really test our code, they instead test the auto simplification + (or lack thereof) of SymPy. + assert N.x == express(( + (cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C.x - + sin(q1)*cos(q2)*C.y + + (sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C.z), N) + assert N.y == express(( + (sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x + + cos(q1)*cos(q2)*C.y + + (sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z), N) + assert N.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y + + cos(q2)*cos(q3)*C.z), N) + """ + + assert A.x == express((cos(q1)*N.x + sin(q1)*N.y), A) + assert A.y == express((-sin(q1)*N.x + cos(q1)*N.y), A) + + assert A.y == express((cos(q2)*B.y - sin(q2)*B.z), A) + assert A.z == express((sin(q2)*B.y + cos(q2)*B.z), A) + + assert A.x == express((cos(q3)*C.x + sin(q3)*C.z), A) + + # Tripsimp messes up here too. + #print express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y - + # sin(q2)*cos(q3)*C.z), A) + assert A.y == express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y - + sin(q2)*cos(q3)*C.z), A) + + assert A.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y + + cos(q2)*cos(q3)*C.z), A) + assert B.x == express((cos(q1)*N.x + sin(q1)*N.y), B) + assert B.y == express((-sin(q1)*cos(q2)*N.x + + cos(q1)*cos(q2)*N.y + sin(q2)*N.z), B) + + assert B.z == express((sin(q1)*sin(q2)*N.x - + sin(q2)*cos(q1)*N.y + cos(q2)*N.z), B) + + assert B.y == express((cos(q2)*A.y + sin(q2)*A.z), B) + assert B.z == express((-sin(q2)*A.y + cos(q2)*A.z), B) + assert B.x == express((cos(q3)*C.x + sin(q3)*C.z), B) + assert B.z == express((-sin(q3)*C.x + cos(q3)*C.z), B) + + """ + assert C.x == express(( + (cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N.x + + (sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N.y - + sin(q3)*cos(q2)*N.z), C) + assert C.y == express(( + -sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z), C) + assert C.z == express(( + (sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N.x + + (sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N.y + + cos(q2)*cos(q3)*N.z), C) + """ + assert C.x == express((cos(q3)*A.x + sin(q2)*sin(q3)*A.y - + sin(q3)*cos(q2)*A.z), C) + assert C.y == express((cos(q2)*A.y + sin(q2)*A.z), C) + assert C.z == express((sin(q3)*A.x - sin(q2)*cos(q3)*A.y + + cos(q2)*cos(q3)*A.z), C) + assert C.x == express((cos(q3)*B.x - sin(q3)*B.z), C) + assert C.z == express((sin(q3)*B.x + cos(q3)*B.z), C) + + +def test_time_derivative(): + #The use of time_derivative for calculations pertaining to scalar + #fields has been tested in test_coordinate_vars in test_essential.py + A = ReferenceFrame('A') + q = dynamicsymbols('q') + qd = dynamicsymbols('q', 1) + B = A.orientnew('B', 'Axis', [q, A.z]) + d = A.x | A.x + assert time_derivative(d, B) == (-qd) * (A.y | A.x) + \ + (-qd) * (A.x | A.y) + d1 = A.x | B.y + assert time_derivative(d1, A) == - qd*(A.x|B.x) + assert time_derivative(d1, B) == - qd*(A.y|B.y) + d2 = A.x | B.x + assert time_derivative(d2, A) == qd*(A.x|B.y) + assert time_derivative(d2, B) == - qd*(A.y|B.x) + d3 = A.x | B.z + assert time_derivative(d3, A) == 0 + assert time_derivative(d3, B) == - qd*(A.y|B.z) + q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4') + q1d, q2d, q3d, q4d = dynamicsymbols('q1 q2 q3 q4', 1) + q1dd, q2dd, q3dd, q4dd = dynamicsymbols('q1 q2 q3 q4', 2) + C = B.orientnew('C', 'Axis', [q4, B.x]) + v1 = q1 * A.z + v2 = q2*A.x + q3*B.y + v3 = q1*A.x + q2*A.y + q3*A.z + assert time_derivative(B.x, C) == 0 + assert time_derivative(B.y, C) == - q4d*B.z + assert time_derivative(B.z, C) == q4d*B.y + assert time_derivative(v1, B) == q1d*A.z + assert time_derivative(v1, C) == - q1*sin(q)*q4d*A.x + \ + q1*cos(q)*q4d*A.y + q1d*A.z + assert time_derivative(v2, A) == q2d*A.x - q3*qd*B.x + q3d*B.y + assert time_derivative(v2, C) == q2d*A.x - q2*qd*A.y + \ + q2*sin(q)*q4d*A.z + q3d*B.y - q3*q4d*B.z + assert time_derivative(v3, B) == (q2*qd + q1d)*A.x + \ + (-q1*qd + q2d)*A.y + q3d*A.z + assert time_derivative(d, C) == - qd*(A.y|A.x) + \ + sin(q)*q4d*(A.z|A.x) - qd*(A.x|A.y) + sin(q)*q4d*(A.x|A.z) + raises(ValueError, lambda: time_derivative(B.x, C, order=0.5)) + raises(ValueError, lambda: time_derivative(B.x, C, order=-1)) + + +def test_get_motion_methods(): + #Initialization + t = dynamicsymbols._t + s1, s2, s3 = symbols('s1 s2 s3') + S1, S2, S3 = symbols('S1 S2 S3') + S4, S5, S6 = symbols('S4 S5 S6') + t1, t2 = symbols('t1 t2') + a, b, c = dynamicsymbols('a b c') + ad, bd, cd = dynamicsymbols('a b c', 1) + a2d, b2d, c2d = dynamicsymbols('a b c', 2) + v0 = S1*N.x + S2*N.y + S3*N.z + v01 = S4*N.x + S5*N.y + S6*N.z + v1 = s1*N.x + s2*N.y + s3*N.z + v2 = a*N.x + b*N.y + c*N.z + v2d = ad*N.x + bd*N.y + cd*N.z + v2dd = a2d*N.x + b2d*N.y + c2d*N.z + #Test position parameter + assert get_motion_params(frame = N) == (0, 0, 0) + assert get_motion_params(N, position=v1) == (0, 0, v1) + assert get_motion_params(N, position=v2) == (v2dd, v2d, v2) + #Test velocity parameter + assert get_motion_params(N, velocity=v1) == (0, v1, v1 * t) + assert get_motion_params(N, velocity=v1, position=v0, timevalue1=t1) == \ + (0, v1, v0 + v1*(t - t1)) + answer = get_motion_params(N, velocity=v1, position=v2, timevalue1=t1) + answer_expected = (0, v1, v1*t - v1*t1 + v2.subs(t, t1)) + assert answer == answer_expected + + answer = get_motion_params(N, velocity=v2, position=v0, timevalue1=t1) + integral_vector = Integral(a, (t, t1, t))*N.x + Integral(b, (t, t1, t))*N.y \ + + Integral(c, (t, t1, t))*N.z + answer_expected = (v2d, v2, v0 + integral_vector) + assert answer == answer_expected + + #Test acceleration parameter + assert get_motion_params(N, acceleration=v1) == \ + (v1, v1 * t, v1 * t**2/2) + assert get_motion_params(N, acceleration=v1, velocity=v0, + position=v2, timevalue1=t1, timevalue2=t2) == \ + (v1, (v0 + v1*t - v1*t2), + -v0*t1 + v1*t**2/2 + v1*t2*t1 - \ + v1*t1**2/2 + t*(v0 - v1*t2) + \ + v2.subs(t, t1)) + assert get_motion_params(N, acceleration=v1, velocity=v0, + position=v01, timevalue1=t1, timevalue2=t2) == \ + (v1, v0 + v1*t - v1*t2, + -v0*t1 + v01 + v1*t**2/2 + \ + v1*t2*t1 - v1*t1**2/2 + \ + t*(v0 - v1*t2)) + answer = get_motion_params(N, acceleration=a*N.x, velocity=S1*N.x, + position=S2*N.x, timevalue1=t1, timevalue2=t2) + i1 = Integral(a, (t, t2, t)) + answer_expected = (a*N.x, (S1 + i1)*N.x, \ + (S2 + Integral(S1 + i1, (t, t1, t)))*N.x) + assert answer == answer_expected + + +def test_kin_eqs(): + q0, q1, q2, q3 = dynamicsymbols('q0 q1 q2 q3') + q0d, q1d, q2d, q3d = dynamicsymbols('q0 q1 q2 q3', 1) + u1, u2, u3 = dynamicsymbols('u1 u2 u3') + ke = kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', 313) + assert ke == kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313') + kds = kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'quaternion') + assert kds == [-0.5 * q0 * u1 - 0.5 * q2 * u3 + 0.5 * q3 * u2 + q1d, + -0.5 * q0 * u2 + 0.5 * q1 * u3 - 0.5 * q3 * u1 + q2d, + -0.5 * q0 * u3 - 0.5 * q1 * u2 + 0.5 * q2 * u1 + q3d, + 0.5 * q1 * u1 + 0.5 * q2 * u2 + 0.5 * q3 * u3 + q0d] + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2], 'quaternion')) + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'quaternion', '123')) + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'foo')) + raises(TypeError, lambda: kinematic_equations(u1, [q0, q1, q2, q3], 'quaternion')) + raises(TypeError, lambda: kinematic_equations([u1], [q0, q1, q2, q3], 'quaternion')) + raises(TypeError, lambda: kinematic_equations([u1, u2, u3], q0, 'quaternion')) + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'body')) + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'space')) + raises(ValueError, lambda: kinematic_equations([u1, u2, u3], [q0, q1, q2], 'body', '222')) + assert kinematic_equations([0, 0, 0], [q0, q1, q2], 'space') == [S.Zero, S.Zero, S.Zero] + + +def test_partial_velocity(): + q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3') + u4, u5 = dynamicsymbols('u4, u5') + r = symbols('r') + + N = ReferenceFrame('N') + Y = N.orientnew('Y', 'Axis', [q1, N.z]) + L = Y.orientnew('L', 'Axis', [q2, Y.x]) + R = L.orientnew('R', 'Axis', [q3, L.y]) + R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z) + + C = Point('C') + C.set_vel(N, u4 * L.x + u5 * (Y.z ^ L.x)) + Dmc = C.locatenew('Dmc', r * L.z) + Dmc.v2pt_theory(C, N, R) + + vel_list = [Dmc.vel(N), C.vel(N), R.ang_vel_in(N)] + u_list = [u1, u2, u3, u4, u5] + assert (partial_velocity(vel_list, u_list, N) == + [[- r*L.y, r*L.x, 0, L.x, cos(q2)*L.y - sin(q2)*L.z], + [0, 0, 0, L.x, cos(q2)*L.y - sin(q2)*L.z], + [L.x, L.y, L.z, 0, 0]]) + + # Make sure that partial velocities can be computed regardless if the + # orientation between frames is defined or not. + A = ReferenceFrame('A') + B = ReferenceFrame('B') + v = u4 * A.x + u5 * B.y + assert partial_velocity((v, ), (u4, u5), A) == [[A.x, B.y]] + + raises(TypeError, lambda: partial_velocity(Dmc.vel(N), u_list, N)) + raises(TypeError, lambda: partial_velocity(vel_list, u1, N)) + +def test_dynamicsymbols(): + #Tests to check the assumptions applied to dynamicsymbols + f1 = dynamicsymbols('f1') + f2 = dynamicsymbols('f2', real=True) + f3 = dynamicsymbols('f3', positive=True) + f4, f5 = dynamicsymbols('f4,f5', commutative=False) + f6 = dynamicsymbols('f6', integer=True) + assert f1.is_real is None + assert f2.is_real + assert f3.is_positive + assert f4*f5 != f5*f4 + assert f6.is_integer diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_output.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_output.py new file mode 100644 index 0000000000000000000000000000000000000000..162bfa30ec6a9944e9ef9faf545cc44b2534b509 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_output.py @@ -0,0 +1,76 @@ +from sympy.core.singleton import S +from sympy.physics.vector import Vector, ReferenceFrame, Dyadic +from sympy.testing.pytest import raises + +Vector.simp = True +A = ReferenceFrame('A') + + +def test_output_type(): + A = ReferenceFrame('A') + v = A.x + A.y + d = v | v + zerov = Vector(0) + zerod = Dyadic(0) + + # dot products + assert isinstance(d & d, Dyadic) + assert isinstance(d & zerod, Dyadic) + assert isinstance(zerod & d, Dyadic) + assert isinstance(d & v, Vector) + assert isinstance(v & d, Vector) + assert isinstance(d & zerov, Vector) + assert isinstance(zerov & d, Vector) + raises(TypeError, lambda: d & S.Zero) + raises(TypeError, lambda: S.Zero & d) + raises(TypeError, lambda: d & 0) + raises(TypeError, lambda: 0 & d) + assert not isinstance(v & v, (Vector, Dyadic)) + assert not isinstance(v & zerov, (Vector, Dyadic)) + assert not isinstance(zerov & v, (Vector, Dyadic)) + raises(TypeError, lambda: v & S.Zero) + raises(TypeError, lambda: S.Zero & v) + raises(TypeError, lambda: v & 0) + raises(TypeError, lambda: 0 & v) + + # cross products + raises(TypeError, lambda: d ^ d) + raises(TypeError, lambda: d ^ zerod) + raises(TypeError, lambda: zerod ^ d) + assert isinstance(d ^ v, Dyadic) + assert isinstance(v ^ d, Dyadic) + assert isinstance(d ^ zerov, Dyadic) + assert isinstance(zerov ^ d, Dyadic) + assert isinstance(zerov ^ d, Dyadic) + raises(TypeError, lambda: d ^ S.Zero) + raises(TypeError, lambda: S.Zero ^ d) + raises(TypeError, lambda: d ^ 0) + raises(TypeError, lambda: 0 ^ d) + assert isinstance(v ^ v, Vector) + assert isinstance(v ^ zerov, Vector) + assert isinstance(zerov ^ v, Vector) + raises(TypeError, lambda: v ^ S.Zero) + raises(TypeError, lambda: S.Zero ^ v) + raises(TypeError, lambda: v ^ 0) + raises(TypeError, lambda: 0 ^ v) + + # outer products + raises(TypeError, lambda: d | d) + raises(TypeError, lambda: d | zerod) + raises(TypeError, lambda: zerod | d) + raises(TypeError, lambda: d | v) + raises(TypeError, lambda: v | d) + raises(TypeError, lambda: d | zerov) + raises(TypeError, lambda: zerov | d) + raises(TypeError, lambda: zerov | d) + raises(TypeError, lambda: d | S.Zero) + raises(TypeError, lambda: S.Zero | d) + raises(TypeError, lambda: d | 0) + raises(TypeError, lambda: 0 | d) + assert isinstance(v | v, Dyadic) + assert isinstance(v | zerov, Dyadic) + assert isinstance(zerov | v, Dyadic) + raises(TypeError, lambda: v | S.Zero) + raises(TypeError, lambda: S.Zero | v) + raises(TypeError, lambda: v | 0) + raises(TypeError, lambda: 0 | v) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_point.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_point.py new file mode 100644 index 0000000000000000000000000000000000000000..e12edd925f22d77d2fdba7ec7e9b6a037e03eff4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_point.py @@ -0,0 +1,379 @@ +from sympy.physics.vector import dynamicsymbols, Point, ReferenceFrame +from sympy.testing.pytest import raises, ignore_warnings +import warnings + +def test_point_v1pt_theorys(): + q, q2 = dynamicsymbols('q q2') + qd, q2d = dynamicsymbols('q q2', 1) + qdd, q2dd = dynamicsymbols('q q2', 2) + N = ReferenceFrame('N') + B = ReferenceFrame('B') + B.set_ang_vel(N, qd * B.z) + O = Point('O') + P = O.locatenew('P', B.x) + P.set_vel(B, 0) + O.set_vel(N, 0) + assert P.v1pt_theory(O, N, B) == qd * B.y + O.set_vel(N, N.x) + assert P.v1pt_theory(O, N, B) == N.x + qd * B.y + P.set_vel(B, B.z) + assert P.v1pt_theory(O, N, B) == B.z + N.x + qd * B.y + + +def test_point_a1pt_theorys(): + q, q2 = dynamicsymbols('q q2') + qd, q2d = dynamicsymbols('q q2', 1) + qdd, q2dd = dynamicsymbols('q q2', 2) + N = ReferenceFrame('N') + B = ReferenceFrame('B') + B.set_ang_vel(N, qd * B.z) + O = Point('O') + P = O.locatenew('P', B.x) + P.set_vel(B, 0) + O.set_vel(N, 0) + assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y + P.set_vel(B, q2d * B.z) + assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y + q2dd * B.z + O.set_vel(N, q2d * B.x) + assert P.a1pt_theory(O, N, B) == ((q2dd - qd**2) * B.x + (q2d * qd + qdd) * B.y + + q2dd * B.z) + + +def test_point_v2pt_theorys(): + q = dynamicsymbols('q') + qd = dynamicsymbols('q', 1) + N = ReferenceFrame('N') + B = N.orientnew('B', 'Axis', [q, N.z]) + O = Point('O') + P = O.locatenew('P', 0) + O.set_vel(N, 0) + assert P.v2pt_theory(O, N, B) == 0 + P = O.locatenew('P', B.x) + assert P.v2pt_theory(O, N, B) == (qd * B.z ^ B.x) + O.set_vel(N, N.x) + assert P.v2pt_theory(O, N, B) == N.x + qd * B.y + + +def test_point_a2pt_theorys(): + q = dynamicsymbols('q') + qd = dynamicsymbols('q', 1) + qdd = dynamicsymbols('q', 2) + N = ReferenceFrame('N') + B = N.orientnew('B', 'Axis', [q, N.z]) + O = Point('O') + P = O.locatenew('P', 0) + O.set_vel(N, 0) + assert P.a2pt_theory(O, N, B) == 0 + P.set_pos(O, B.x) + assert P.a2pt_theory(O, N, B) == (-qd**2) * B.x + (qdd) * B.y + + +def test_point_funcs(): + q, q2 = dynamicsymbols('q q2') + qd, q2d = dynamicsymbols('q q2', 1) + qdd, q2dd = dynamicsymbols('q q2', 2) + N = ReferenceFrame('N') + B = ReferenceFrame('B') + B.set_ang_vel(N, 5 * B.y) + O = Point('O') + P = O.locatenew('P', q * B.x) + assert P.pos_from(O) == q * B.x + P.set_vel(B, qd * B.x + q2d * B.y) + assert P.vel(B) == qd * B.x + q2d * B.y + O.set_vel(N, 0) + assert O.vel(N) == 0 + assert P.a1pt_theory(O, N, B) == ((-25 * q + qdd) * B.x + (q2dd) * B.y + + (-10 * qd) * B.z) + + B = N.orientnew('B', 'Axis', [q, N.z]) + O = Point('O') + P = O.locatenew('P', 10 * B.x) + O.set_vel(N, 5 * N.x) + assert O.vel(N) == 5 * N.x + assert P.a2pt_theory(O, N, B) == (-10 * qd**2) * B.x + (10 * qdd) * B.y + + B.set_ang_vel(N, 5 * B.y) + O = Point('O') + P = O.locatenew('P', q * B.x) + P.set_vel(B, qd * B.x + q2d * B.y) + O.set_vel(N, 0) + assert P.v1pt_theory(O, N, B) == qd * B.x + q2d * B.y - 5 * q * B.z + + +def test_point_pos(): + q = dynamicsymbols('q') + N = ReferenceFrame('N') + B = N.orientnew('B', 'Axis', [q, N.z]) + O = Point('O') + P = O.locatenew('P', 10 * N.x + 5 * B.x) + assert P.pos_from(O) == 10 * N.x + 5 * B.x + Q = P.locatenew('Q', 10 * N.y + 5 * B.y) + assert Q.pos_from(P) == 10 * N.y + 5 * B.y + assert Q.pos_from(O) == 10 * N.x + 10 * N.y + 5 * B.x + 5 * B.y + assert O.pos_from(Q) == -10 * N.x - 10 * N.y - 5 * B.x - 5 * B.y + +def test_point_partial_velocity(): + + N = ReferenceFrame('N') + A = ReferenceFrame('A') + + p = Point('p') + + u1, u2 = dynamicsymbols('u1, u2') + + p.set_vel(N, u1 * A.x + u2 * N.y) + + assert p.partial_velocity(N, u1) == A.x + assert p.partial_velocity(N, u1, u2) == (A.x, N.y) + raises(ValueError, lambda: p.partial_velocity(A, u1)) + +def test_point_vel(): #Basic functionality + q1, q2 = dynamicsymbols('q1 q2') + N = ReferenceFrame('N') + B = ReferenceFrame('B') + Q = Point('Q') + O = Point('O') + Q.set_pos(O, q1 * N.x) + raises(ValueError , lambda: Q.vel(N)) # Velocity of O in N is not defined + O.set_vel(N, q2 * N.y) + assert O.vel(N) == q2 * N.y + raises(ValueError , lambda : O.vel(B)) #Velocity of O is not defined in B + +def test_auto_point_vel(): + t = dynamicsymbols._t + q1, q2 = dynamicsymbols('q1 q2') + N = ReferenceFrame('N') + B = ReferenceFrame('B') + O = Point('O') + Q = Point('Q') + Q.set_pos(O, q1 * N.x) + O.set_vel(N, q2 * N.y) + assert Q.vel(N) == q1.diff(t) * N.x + q2 * N.y # Velocity of Q using O + P1 = Point('P1') + P1.set_pos(O, q1 * B.x) + P2 = Point('P2') + P2.set_pos(P1, q2 * B.z) + raises(ValueError, lambda : P2.vel(B)) # O's velocity is defined in different frame, and no + #point in between has its velocity defined + raises(ValueError, lambda: P2.vel(N)) # Velocity of O not defined in N + +def test_auto_point_vel_multiple_point_path(): + t = dynamicsymbols._t + q1, q2 = dynamicsymbols('q1 q2') + B = ReferenceFrame('B') + P = Point('P') + P.set_vel(B, q1 * B.x) + P1 = Point('P1') + P1.set_pos(P, q2 * B.y) + P1.set_vel(B, q1 * B.z) + P2 = Point('P2') + P2.set_pos(P1, q1 * B.z) + P3 = Point('P3') + P3.set_pos(P2, 10 * q1 * B.y) + assert P3.vel(B) == 10 * q1.diff(t) * B.y + (q1 + q1.diff(t)) * B.z + +def test_auto_vel_dont_overwrite(): + t = dynamicsymbols._t + q1, q2, u1 = dynamicsymbols('q1, q2, u1') + N = ReferenceFrame('N') + P = Point('P1') + P.set_vel(N, u1 * N.x) + P1 = Point('P1') + P1.set_pos(P, q2 * N.y) + assert P1.vel(N) == q2.diff(t) * N.y + u1 * N.x + assert P.vel(N) == u1 * N.x + P1.set_vel(N, u1 * N.z) + assert P1.vel(N) == u1 * N.z + +def test_auto_point_vel_if_tree_has_vel_but_inappropriate_pos_vector(): + q1, q2 = dynamicsymbols('q1 q2') + B = ReferenceFrame('B') + S = ReferenceFrame('S') + P = Point('P') + P.set_vel(B, q1 * B.x) + P1 = Point('P1') + P1.set_pos(P, S.y) + raises(ValueError, lambda : P1.vel(B)) # P1.pos_from(P) can't be expressed in B + raises(ValueError, lambda : P1.vel(S)) # P.vel(S) not defined + +def test_auto_point_vel_shortest_path(): + t = dynamicsymbols._t + q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2') + B = ReferenceFrame('B') + P = Point('P') + P.set_vel(B, u1 * B.x) + P1 = Point('P1') + P1.set_pos(P, q2 * B.y) + P1.set_vel(B, q1 * B.z) + P2 = Point('P2') + P2.set_pos(P1, q1 * B.z) + P3 = Point('P3') + P3.set_pos(P2, 10 * q1 * B.y) + P4 = Point('P4') + P4.set_pos(P3, q1 * B.x) + O = Point('O') + O.set_vel(B, u2 * B.y) + O1 = Point('O1') + O1.set_pos(O, q2 * B.z) + P4.set_pos(O1, q1 * B.x + q2 * B.z) + with warnings.catch_warnings(): #There are two possible paths in this point tree, thus a warning is raised + warnings.simplefilter('error') + with ignore_warnings(UserWarning): + assert P4.vel(B) == q1.diff(t) * B.x + u2 * B.y + 2 * q2.diff(t) * B.z + +def test_auto_point_vel_connected_frames(): + t = dynamicsymbols._t + q, q1, q2, u = dynamicsymbols('q q1 q2 u') + N = ReferenceFrame('N') + B = ReferenceFrame('B') + O = Point('O') + O.set_vel(N, u * N.x) + P = Point('P') + P.set_pos(O, q1 * N.x + q2 * B.y) + raises(ValueError, lambda: P.vel(N)) + N.orient(B, 'Axis', (q, B.x)) + assert P.vel(N) == (u + q1.diff(t)) * N.x + q2.diff(t) * B.y - q2 * q.diff(t) * B.z + +def test_auto_point_vel_multiple_paths_warning_arises(): + q, u = dynamicsymbols('q u') + N = ReferenceFrame('N') + O = Point('O') + P = Point('P') + Q = Point('Q') + R = Point('R') + P.set_vel(N, u * N.x) + Q.set_vel(N, u *N.y) + R.set_vel(N, u * N.z) + O.set_pos(P, q * N.z) + O.set_pos(Q, q * N.y) + O.set_pos(R, q * N.x) + with warnings.catch_warnings(): #There are two possible paths in this point tree, thus a warning is raised + warnings.simplefilter("error") + raises(UserWarning ,lambda: O.vel(N)) + +def test_auto_vel_cyclic_warning_arises(): + P = Point('P') + P1 = Point('P1') + P2 = Point('P2') + P3 = Point('P3') + N = ReferenceFrame('N') + P.set_vel(N, N.x) + P1.set_pos(P, N.x) + P2.set_pos(P1, N.y) + P3.set_pos(P2, N.z) + P1.set_pos(P3, N.x + N.y) + with warnings.catch_warnings(): #The path is cyclic at P1, thus a warning is raised + warnings.simplefilter("error") + raises(UserWarning ,lambda: P2.vel(N)) + +def test_auto_vel_cyclic_warning_msg(): + P = Point('P') + P1 = Point('P1') + P2 = Point('P2') + P3 = Point('P3') + N = ReferenceFrame('N') + P.set_vel(N, N.x) + P1.set_pos(P, N.x) + P2.set_pos(P1, N.y) + P3.set_pos(P2, N.z) + P1.set_pos(P3, N.x + N.y) + with warnings.catch_warnings(record = True) as w: #The path is cyclic at P1, thus a warning is raised + warnings.simplefilter("always") + P2.vel(N) + assert issubclass(w[-1].category, UserWarning) + assert 'Kinematic loops are defined among the positions of points. This is likely not desired and may cause errors in your calculations.' in str(w[-1].message) + +def test_auto_vel_multiple_path_warning_msg(): + N = ReferenceFrame('N') + O = Point('O') + P = Point('P') + Q = Point('Q') + P.set_vel(N, N.x) + Q.set_vel(N, N.y) + O.set_pos(P, N.z) + O.set_pos(Q, N.y) + with warnings.catch_warnings(record = True) as w: #There are two possible paths in this point tree, thus a warning is raised + warnings.simplefilter("always") + O.vel(N) + assert issubclass(w[-1].category, UserWarning) + assert 'Velocity automatically calculated based on point' in str(w[-1].message) + assert 'Velocities from these points are not necessarily the same. This may cause errors in your calculations.' in str(w[-1].message) + +def test_auto_vel_derivative(): + q1, q2 = dynamicsymbols('q1:3') + u1, u2 = dynamicsymbols('u1:3', 1) + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + B.orient_axis(A, A.z, q1) + B.set_ang_vel(A, u1 * A.z) + C.orient_axis(B, B.z, q2) + C.set_ang_vel(B, u2 * B.z) + + Am = Point('Am') + Am.set_vel(A, 0) + Bm = Point('Bm') + Bm.set_pos(Am, B.x) + Bm.set_vel(B, 0) + Bm.set_vel(C, 0) + Cm = Point('Cm') + Cm.set_pos(Bm, C.x) + Cm.set_vel(C, 0) + temp = Cm._vel_dict.copy() + assert Cm.vel(A) == (u1 * B.y + (u1 + u2) * C.y) + Cm._vel_dict = temp + Cm.v2pt_theory(Bm, B, C) + assert Cm.vel(A) == (u1 * B.y + (u1 + u2) * C.y) + +def test_auto_point_acc_zero_vel(): + N = ReferenceFrame('N') + O = Point('O') + O.set_vel(N, 0) + assert O.acc(N) == 0 * N.x + +def test_auto_point_acc_compute_vel(): + t = dynamicsymbols._t + q1 = dynamicsymbols('q1') + N = ReferenceFrame('N') + A = ReferenceFrame('A') + A.orient_axis(N, N.z, q1) + + O = Point('O') + O.set_vel(N, 0) + P = Point('P') + P.set_pos(O, A.x) + assert P.acc(N) == -q1.diff(t) ** 2 * A.x + q1.diff(t, 2) * A.y + +def test_auto_acc_derivative(): + # Tests whether the Point.acc method gives the correct acceleration of the + # end point of two linkages in series, while getting minimal information. + q1, q2 = dynamicsymbols('q1:3') + u1, u2 = dynamicsymbols('q1:3', 1) + v1, v2 = dynamicsymbols('q1:3', 2) + A = ReferenceFrame('A') + B = ReferenceFrame('B') + C = ReferenceFrame('C') + B.orient_axis(A, A.z, q1) + C.orient_axis(B, B.z, q2) + + Am = Point('Am') + Am.set_vel(A, 0) + Bm = Point('Bm') + Bm.set_pos(Am, B.x) + Bm.set_vel(B, 0) + Bm.set_vel(C, 0) + Cm = Point('Cm') + Cm.set_pos(Bm, C.x) + Cm.set_vel(C, 0) + + # Copy dictionaries to later check the calculation using the 2pt_theories + Bm_vel_dict, Cm_vel_dict = Bm._vel_dict.copy(), Cm._vel_dict.copy() + Bm_acc_dict, Cm_acc_dict = Bm._acc_dict.copy(), Cm._acc_dict.copy() + check = -u1 ** 2 * B.x + v1 * B.y - (u1 + u2) ** 2 * C.x + (v1 + v2) * C.y + assert Cm.acc(A) == check + Bm._vel_dict, Cm._vel_dict = Bm_vel_dict, Cm_vel_dict + Bm._acc_dict, Cm._acc_dict = Bm_acc_dict, Cm_acc_dict + Bm.v2pt_theory(Am, A, B) + Cm.v2pt_theory(Bm, A, C) + Bm.a2pt_theory(Am, A, B) + assert Cm.a2pt_theory(Bm, A, C) == check diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_printing.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c60c5d17ab30e8c9d4c42e7ed70bd74e30e20a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_printing.py @@ -0,0 +1,331 @@ +# -*- coding: utf-8 -*- + +from sympy.core.function import Function +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (asin, cos, sin) +from sympy.physics.vector import ReferenceFrame, dynamicsymbols, Dyadic +from sympy.physics.vector.printing import (VectorLatexPrinter, vpprint, + vsprint, vsstrrepr, vlatex) + + +a, b, c = symbols('a, b, c') +alpha, omega, beta = dynamicsymbols('alpha, omega, beta') + +A = ReferenceFrame('A') +N = ReferenceFrame('N') + +v = a ** 2 * N.x + b * N.y + c * sin(alpha) * N.z +w = alpha * N.x + sin(omega) * N.y + alpha * beta * N.z +ww = alpha * N.x + asin(omega) * N.y - alpha.diff() * beta * N.z +o = a/b * N.x + (c+b)/a * N.y + c**2/b * N.z + +y = a ** 2 * (N.x | N.y) + b * (N.y | N.y) + c * sin(alpha) * (N.z | N.y) +x = alpha * (N.x | N.x) + sin(omega) * (N.y | N.z) + alpha * beta * (N.z | N.x) +xx = N.x | (-N.y - N.z) +xx2 = N.x | (N.y + N.z) + +def ascii_vpretty(expr): + return vpprint(expr, use_unicode=False, wrap_line=False) + + +def unicode_vpretty(expr): + return vpprint(expr, use_unicode=True, wrap_line=False) + + +def test_latex_printer(): + r = Function('r')('t') + assert VectorLatexPrinter().doprint(r ** 2) == "r^{2}" + r2 = Function('r^2')('t') + assert VectorLatexPrinter().doprint(r2.diff()) == r'\dot{r^{2}}' + ra = Function('r__a')('t') + assert VectorLatexPrinter().doprint(ra.diff().diff()) == r'\ddot{r^{a}}' + + +def test_vector_pretty_print(): + + # TODO : The unit vectors should print with subscripts but they just + # print as `n_x` instead of making `x` a subscript with unicode. + + # TODO : The pretty print division does not print correctly here: + # w = alpha * N.x + sin(omega) * N.y + alpha / beta * N.z + + expected = """\ + 2 +a n_x + b n_y + c*sin(alpha) n_z\ +""" + uexpected = """\ + 2 +a n_x + b n_y + c⋅sin(α) n_z\ +""" + + assert ascii_vpretty(v) == expected + assert unicode_vpretty(v) == uexpected + + expected = 'alpha n_x + sin(omega) n_y + alpha*beta n_z' + uexpected = 'α n_x + sin(ω) n_y + α⋅β n_z' + + assert ascii_vpretty(w) == expected + assert unicode_vpretty(w) == uexpected + + expected = """\ + 2 +a b + c c +- n_x + ----- n_y + -- n_z +b a b\ +""" + uexpected = """\ + 2 +a b + c c +─ n_x + ───── n_y + ── n_z +b a b\ +""" + + assert ascii_vpretty(o) == expected + assert unicode_vpretty(o) == uexpected + + +def test_vector_latex(): + + a, b, c, d, omega = symbols('a, b, c, d, omega') + + v = (a ** 2 + b / c) * A.x + sqrt(d) * A.y + cos(omega) * A.z + + assert vlatex(v) == (r'(a^{2} + \frac{b}{c})\mathbf{\hat{a}_x} + ' + r'\sqrt{d}\mathbf{\hat{a}_y} + ' + r'\cos{\left(\omega \right)}' + r'\mathbf{\hat{a}_z}') + + theta, omega, alpha, q = dynamicsymbols('theta, omega, alpha, q') + + v = theta * A.x + omega * omega * A.y + (q * alpha) * A.z + + assert vlatex(v) == (r'\theta\mathbf{\hat{a}_x} + ' + r'\omega^{2}\mathbf{\hat{a}_y} + ' + r'\alpha q\mathbf{\hat{a}_z}') + + phi1, phi2, phi3 = dynamicsymbols('phi1, phi2, phi3') + theta1, theta2, theta3 = symbols('theta1, theta2, theta3') + + v = (sin(theta1) * A.x + + cos(phi1) * cos(phi2) * A.y + + cos(theta1 + phi3) * A.z) + + assert vlatex(v) == (r'\sin{\left(\theta_{1} \right)}' + r'\mathbf{\hat{a}_x} + \cos{' + r'\left(\phi_{1} \right)} \cos{' + r'\left(\phi_{2} \right)}\mathbf{\hat{a}_y} + ' + r'\cos{\left(\theta_{1} + ' + r'\phi_{3} \right)}\mathbf{\hat{a}_z}') + + N = ReferenceFrame('N') + + a, b, c, d, omega = symbols('a, b, c, d, omega') + + v = (a ** 2 + b / c) * N.x + sqrt(d) * N.y + cos(omega) * N.z + + expected = (r'(a^{2} + \frac{b}{c})\mathbf{\hat{n}_x} + ' + r'\sqrt{d}\mathbf{\hat{n}_y} + ' + r'\cos{\left(\omega \right)}' + r'\mathbf{\hat{n}_z}') + + assert vlatex(v) == expected + + # Try custom unit vectors. + + N = ReferenceFrame('N', latexs=(r'\hat{i}', r'\hat{j}', r'\hat{k}')) + + v = (a ** 2 + b / c) * N.x + sqrt(d) * N.y + cos(omega) * N.z + + expected = (r'(a^{2} + \frac{b}{c})\hat{i} + ' + r'\sqrt{d}\hat{j} + ' + r'\cos{\left(\omega \right)}\hat{k}') + assert vlatex(v) == expected + + expected = r'\alpha\mathbf{\hat{n}_x} + \operatorname{asin}{\left(\omega ' \ + r'\right)}\mathbf{\hat{n}_y} - \beta \dot{\alpha}\mathbf{\hat{n}_z}' + assert vlatex(ww) == expected + + expected = r'- \mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_y} - ' \ + r'\mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_z}' + assert vlatex(xx) == expected + + expected = r'\mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_y} + ' \ + r'\mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_z}' + assert vlatex(xx2) == expected + + +def test_vector_latex_arguments(): + assert vlatex(N.x * 3.0, full_prec=False) == r'3.0\mathbf{\hat{n}_x}' + assert vlatex(N.x * 3.0, full_prec=True) == r'3.00000000000000\mathbf{\hat{n}_x}' + + +def test_vector_latex_with_functions(): + + N = ReferenceFrame('N') + + omega, alpha = dynamicsymbols('omega, alpha') + + v = omega.diff() * N.x + + assert vlatex(v) == r'\dot{\omega}\mathbf{\hat{n}_x}' + + v = omega.diff() ** alpha * N.x + + assert vlatex(v) == (r'\dot{\omega}^{\alpha}' + r'\mathbf{\hat{n}_x}') + + +def test_dyadic_pretty_print(): + + expected = """\ + 2 +a n_x|n_y + b n_y|n_y + c*sin(alpha) n_z|n_y\ +""" + + uexpected = """\ + 2 +a n_x⊗n_y + b n_y⊗n_y + c⋅sin(α) n_z⊗n_y\ +""" + assert ascii_vpretty(y) == expected + assert unicode_vpretty(y) == uexpected + + expected = 'alpha n_x|n_x + sin(omega) n_y|n_z + alpha*beta n_z|n_x' + uexpected = 'α n_x⊗n_x + sin(ω) n_y⊗n_z + α⋅β n_z⊗n_x' + assert ascii_vpretty(x) == expected + assert unicode_vpretty(x) == uexpected + + assert ascii_vpretty(Dyadic([])) == '0' + assert unicode_vpretty(Dyadic([])) == '0' + + assert ascii_vpretty(xx) == '- n_x|n_y - n_x|n_z' + assert unicode_vpretty(xx) == '- n_x⊗n_y - n_x⊗n_z' + + assert ascii_vpretty(xx2) == 'n_x|n_y + n_x|n_z' + assert unicode_vpretty(xx2) == 'n_x⊗n_y + n_x⊗n_z' + + +def test_dyadic_latex(): + + expected = (r'a^{2}\mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_y} + ' + r'b\mathbf{\hat{n}_y}\otimes \mathbf{\hat{n}_y} + ' + r'c \sin{\left(\alpha \right)}' + r'\mathbf{\hat{n}_z}\otimes \mathbf{\hat{n}_y}') + + assert vlatex(y) == expected + + expected = (r'\alpha\mathbf{\hat{n}_x}\otimes \mathbf{\hat{n}_x} + ' + r'\sin{\left(\omega \right)}\mathbf{\hat{n}_y}' + r'\otimes \mathbf{\hat{n}_z} + ' + r'\alpha \beta\mathbf{\hat{n}_z}\otimes \mathbf{\hat{n}_x}') + + assert vlatex(x) == expected + + assert vlatex(Dyadic([])) == '0' + + +def test_dyadic_str(): + assert vsprint(Dyadic([])) == '0' + assert vsprint(y) == 'a**2*(N.x|N.y) + b*(N.y|N.y) + c*sin(alpha)*(N.z|N.y)' + assert vsprint(x) == 'alpha*(N.x|N.x) + sin(omega)*(N.y|N.z) + alpha*beta*(N.z|N.x)' + assert vsprint(ww) == "alpha*N.x + asin(omega)*N.y - beta*alpha'*N.z" + assert vsprint(xx) == '- (N.x|N.y) - (N.x|N.z)' + assert vsprint(xx2) == '(N.x|N.y) + (N.x|N.z)' + + +def test_vlatex(): # vlatex is broken #12078 + from sympy.physics.vector import vlatex + + x = symbols('x') + J = symbols('J') + + f = Function('f') + g = Function('g') + h = Function('h') + + expected = r'J \left(\frac{d}{d x} g{\left(x \right)} - \frac{d}{d x} h{\left(x \right)}\right)' + + expr = J*f(x).diff(x).subs(f(x), g(x)-h(x)) + + assert vlatex(expr) == expected + + +def test_issue_13354(): + """ + Test for proper pretty printing of physics vectors with ADD + instances in arguments. + + Test is exactly the one suggested in the original bug report by + @moorepants. + """ + + a, b, c = symbols('a, b, c') + A = ReferenceFrame('A') + v = a * A.x + b * A.y + c * A.z + w = b * A.x + c * A.y + a * A.z + z = w + v + + expected = """(a + b) a_x + (b + c) a_y + (a + c) a_z""" + + assert ascii_vpretty(z) == expected + + +def test_vector_derivative_printing(): + # First order + v = omega.diff() * N.x + assert unicode_vpretty(v) == 'ω̇ n_x' + assert ascii_vpretty(v) == "omega'(t) n_x" + + # Second order + v = omega.diff().diff() * N.x + + assert vlatex(v) == r'\ddot{\omega}\mathbf{\hat{n}_x}' + assert unicode_vpretty(v) == 'ω̈ n_x' + assert ascii_vpretty(v) == "omega''(t) n_x" + + # Third order + v = omega.diff().diff().diff() * N.x + + assert vlatex(v) == r'\dddot{\omega}\mathbf{\hat{n}_x}' + assert unicode_vpretty(v) == 'ω⃛ n_x' + assert ascii_vpretty(v) == "omega'''(t) n_x" + + # Fourth order + v = omega.diff().diff().diff().diff() * N.x + + assert vlatex(v) == r'\ddddot{\omega}\mathbf{\hat{n}_x}' + assert unicode_vpretty(v) == 'ω⃜ n_x' + assert ascii_vpretty(v) == "omega''''(t) n_x" + + # Fifth order + v = omega.diff().diff().diff().diff().diff() * N.x + + assert vlatex(v) == r'\frac{d^{5}}{d t^{5}} \omega\mathbf{\hat{n}_x}' + assert unicode_vpretty(v) == ' 5\n d\n───(ω) n_x\n 5\ndt' + assert ascii_vpretty(v) == ' 5\n d\n---(omega) n_x\n 5\ndt' + + +def test_vector_str_printing(): + assert vsprint(w) == 'alpha*N.x + sin(omega)*N.y + alpha*beta*N.z' + assert vsprint(omega.diff() * N.x) == "omega'*N.x" + assert vsstrrepr(w) == 'alpha*N.x + sin(omega)*N.y + alpha*beta*N.z' + + +def test_vector_str_arguments(): + assert vsprint(N.x * 3.0, full_prec=False) == '3.0*N.x' + assert vsprint(N.x * 3.0, full_prec=True) == '3.00000000000000*N.x' + + +def test_issue_14041(): + import sympy.physics.mechanics as me + + A_frame = me.ReferenceFrame('A') + thetad, phid = me.dynamicsymbols('theta, phi', 1) + L = symbols('L') + + assert vlatex(L*(phid + thetad)**2*A_frame.x) == \ + r"L \left(\dot{\phi} + \dot{\theta}\right)^{2}\mathbf{\hat{a}_x}" + assert vlatex((phid + thetad)**2*A_frame.x) == \ + r"\left(\dot{\phi} + \dot{\theta}\right)^{2}\mathbf{\hat{a}_x}" + assert vlatex((phid*thetad)**a*A_frame.x) == \ + r"\left(\dot{\phi} \dot{\theta}\right)^{a}\mathbf{\hat{a}_x}" diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_vector.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_vector.py new file mode 100644 index 0000000000000000000000000000000000000000..19701ddf250a97649758a0994ad3c49c742fe800 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/tests/test_vector.py @@ -0,0 +1,247 @@ +from sympy.core.numbers import (Float, pi) +from sympy.core.symbol import symbols +from sympy.core.sorting import ordered +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix +from sympy.physics.vector import ReferenceFrame, Vector, dynamicsymbols, dot +from sympy.physics.vector.vector import VectorTypeError +from sympy.abc import x, y, z +from sympy.testing.pytest import raises + + +Vector.simp = True +A = ReferenceFrame('A') + + +def test_free_dynamicsymbols(): + A, B, C, D = symbols('A, B, C, D', cls=ReferenceFrame) + a, b, c, d, e, f = dynamicsymbols('a, b, c, d, e, f') + B.orient_axis(A, a, A.x) + C.orient_axis(B, b, B.y) + D.orient_axis(C, c, C.x) + + v = d*D.x + e*D.y + f*D.z + + assert set(ordered(v.free_dynamicsymbols(A))) == {a, b, c, d, e, f} + assert set(ordered(v.free_dynamicsymbols(B))) == {b, c, d, e, f} + assert set(ordered(v.free_dynamicsymbols(C))) == {c, d, e, f} + assert set(ordered(v.free_dynamicsymbols(D))) == {d, e, f} + + +def test_Vector(): + assert A.x != A.y + assert A.y != A.z + assert A.z != A.x + + assert A.x + 0 == A.x + + v1 = x*A.x + y*A.y + z*A.z + v2 = x**2*A.x + y**2*A.y + z**2*A.z + v3 = v1 + v2 + v4 = v1 - v2 + + assert isinstance(v1, Vector) + assert dot(v1, A.x) == x + assert dot(v1, A.y) == y + assert dot(v1, A.z) == z + + assert isinstance(v2, Vector) + assert dot(v2, A.x) == x**2 + assert dot(v2, A.y) == y**2 + assert dot(v2, A.z) == z**2 + + assert isinstance(v3, Vector) + # We probably shouldn't be using simplify in dot... + assert dot(v3, A.x) == x**2 + x + assert dot(v3, A.y) == y**2 + y + assert dot(v3, A.z) == z**2 + z + + assert isinstance(v4, Vector) + # We probably shouldn't be using simplify in dot... + assert dot(v4, A.x) == x - x**2 + assert dot(v4, A.y) == y - y**2 + assert dot(v4, A.z) == z - z**2 + + assert v1.to_matrix(A) == Matrix([[x], [y], [z]]) + q = symbols('q') + B = A.orientnew('B', 'Axis', (q, A.x)) + assert v1.to_matrix(B) == Matrix([[x], + [ y * cos(q) + z * sin(q)], + [-y * sin(q) + z * cos(q)]]) + + #Test the separate method + B = ReferenceFrame('B') + v5 = x*A.x + y*A.y + z*B.z + assert Vector(0).separate() == {} + assert v1.separate() == {A: v1} + assert v5.separate() == {A: x*A.x + y*A.y, B: z*B.z} + + #Test the free_symbols property + v6 = x*A.x + y*A.y + z*A.z + assert v6.free_symbols(A) == {x,y,z} + + raises(TypeError, lambda: v3.applyfunc(v1)) + + +def test_Vector_diffs(): + q1, q2, q3, q4 = dynamicsymbols('q1 q2 q3 q4') + q1d, q2d, q3d, q4d = dynamicsymbols('q1 q2 q3 q4', 1) + q1dd, q2dd, q3dd, q4dd = dynamicsymbols('q1 q2 q3 q4', 2) + N = ReferenceFrame('N') + A = N.orientnew('A', 'Axis', [q3, N.z]) + B = A.orientnew('B', 'Axis', [q2, A.x]) + v1 = q2 * A.x + q3 * N.y + v2 = q3 * B.x + v1 + v3 = v1.dt(B) + v4 = v2.dt(B) + v5 = q1*A.x + q2*A.y + q3*A.z + + assert v1.dt(N) == q2d * A.x + q2 * q3d * A.y + q3d * N.y + assert v1.dt(A) == q2d * A.x + q3 * q3d * N.x + q3d * N.y + assert v1.dt(B) == (q2d * A.x + q3 * q3d * N.x + q3d * + N.y - q3 * cos(q3) * q2d * N.z) + assert v2.dt(N) == (q2d * A.x + (q2 + q3) * q3d * A.y + q3d * B.x + q3d * + N.y) + assert v2.dt(A) == q2d * A.x + q3d * B.x + q3 * q3d * N.x + q3d * N.y + assert v2.dt(B) == (q2d * A.x + q3d * B.x + q3 * q3d * N.x + q3d * N.y - + q3 * cos(q3) * q2d * N.z) + assert v3.dt(N) == (q2dd * A.x + q2d * q3d * A.y + (q3d**2 + q3 * q3dd) * + N.x + q3dd * N.y + (q3 * sin(q3) * q2d * q3d - + cos(q3) * q2d * q3d - q3 * cos(q3) * q2dd) * N.z) + assert v3.dt(A) == (q2dd * A.x + (2 * q3d**2 + q3 * q3dd) * N.x + (q3dd - + q3 * q3d**2) * N.y + (q3 * sin(q3) * q2d * q3d - + cos(q3) * q2d * q3d - q3 * cos(q3) * q2dd) * N.z) + assert v3.dt(B) == (q2dd * A.x - q3 * cos(q3) * q2d**2 * A.y + (2 * + q3d**2 + q3 * q3dd) * N.x + (q3dd - q3 * q3d**2) * + N.y + (2 * q3 * sin(q3) * q2d * q3d - 2 * cos(q3) * + q2d * q3d - q3 * cos(q3) * q2dd) * N.z) + assert v4.dt(N) == (q2dd * A.x + q3d * (q2d + q3d) * A.y + q3dd * B.x + + (q3d**2 + q3 * q3dd) * N.x + q3dd * N.y + (q3 * + sin(q3) * q2d * q3d - cos(q3) * q2d * q3d - q3 * + cos(q3) * q2dd) * N.z) + assert v4.dt(A) == (q2dd * A.x + q3dd * B.x + (2 * q3d**2 + q3 * q3dd) * + N.x + (q3dd - q3 * q3d**2) * N.y + (q3 * sin(q3) * + q2d * q3d - cos(q3) * q2d * q3d - q3 * cos(q3) * + q2dd) * N.z) + assert v4.dt(B) == (q2dd * A.x - q3 * cos(q3) * q2d**2 * A.y + q3dd * B.x + + (2 * q3d**2 + q3 * q3dd) * N.x + (q3dd - q3 * q3d**2) * + N.y + (2 * q3 * sin(q3) * q2d * q3d - 2 * cos(q3) * + q2d * q3d - q3 * cos(q3) * q2dd) * N.z) + assert v5.dt(B) == q1d*A.x + (q3*q2d + q2d)*A.y + (-q2*q2d + q3d)*A.z + assert v5.dt(A) == q1d*A.x + q2d*A.y + q3d*A.z + assert v5.dt(N) == (-q2*q3d + q1d)*A.x + (q1*q3d + q2d)*A.y + q3d*A.z + assert v3.diff(q1d, N) == 0 + assert v3.diff(q2d, N) == A.x - q3 * cos(q3) * N.z + assert v3.diff(q3d, N) == q3 * N.x + N.y + assert v3.diff(q1d, A) == 0 + assert v3.diff(q2d, A) == A.x - q3 * cos(q3) * N.z + assert v3.diff(q3d, A) == q3 * N.x + N.y + assert v3.diff(q1d, B) == 0 + assert v3.diff(q2d, B) == A.x - q3 * cos(q3) * N.z + assert v3.diff(q3d, B) == q3 * N.x + N.y + assert v4.diff(q1d, N) == 0 + assert v4.diff(q2d, N) == A.x - q3 * cos(q3) * N.z + assert v4.diff(q3d, N) == B.x + q3 * N.x + N.y + assert v4.diff(q1d, A) == 0 + assert v4.diff(q2d, A) == A.x - q3 * cos(q3) * N.z + assert v4.diff(q3d, A) == B.x + q3 * N.x + N.y + assert v4.diff(q1d, B) == 0 + assert v4.diff(q2d, B) == A.x - q3 * cos(q3) * N.z + assert v4.diff(q3d, B) == B.x + q3 * N.x + N.y + + # diff() should only express vector components in the derivative frame if + # the orientation of the component's frame depends on the variable + v6 = q2**2*N.y + q2**2*A.y + q2**2*B.y + # already expressed in N + n_measy = 2*q2 + # A_C_N does not depend on q2, so don't express in N + a_measy = 2*q2 + # B_C_N depends on q2, so express in N + b_measx = (q2**2*B.y).dot(N.x).diff(q2) + b_measy = (q2**2*B.y).dot(N.y).diff(q2) + b_measz = (q2**2*B.y).dot(N.z).diff(q2) + n_comp, a_comp = v6.diff(q2, N).args + assert len(v6.diff(q2, N).args) == 2 # only N and A parts + assert n_comp[1] == N + assert a_comp[1] == A + assert n_comp[0] == Matrix([b_measx, b_measy + n_measy, b_measz]) + assert a_comp[0] == Matrix([0, a_measy, 0]) + + +def test_vector_var_in_dcm(): + + N = ReferenceFrame('N') + A = ReferenceFrame('A') + B = ReferenceFrame('B') + u1, u2, u3, u4 = dynamicsymbols('u1 u2 u3 u4') + + v = u1 * u2 * A.x + u3 * N.y + u4**2 * N.z + + assert v.diff(u1, N, var_in_dcm=False) == u2 * A.x + assert v.diff(u1, A, var_in_dcm=False) == u2 * A.x + assert v.diff(u3, N, var_in_dcm=False) == N.y + assert v.diff(u3, A, var_in_dcm=False) == N.y + assert v.diff(u3, B, var_in_dcm=False) == N.y + assert v.diff(u4, N, var_in_dcm=False) == 2 * u4 * N.z + + raises(ValueError, lambda: v.diff(u1, N)) + + +def test_vector_simplify(): + x, y, z, k, n, m, w, f, s, A = symbols('x, y, z, k, n, m, w, f, s, A') + N = ReferenceFrame('N') + + test1 = (1 / x + 1 / y) * N.x + assert (test1 & N.x) != (x + y) / (x * y) + test1 = test1.simplify() + assert (test1 & N.x) == (x + y) / (x * y) + + test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * N.x + test2 = test2.simplify() + assert (test2 & N.x) == (A**2 * s**4 / (4 * pi * k * m**3)) + + test3 = ((4 + 4 * x - 2 * (2 + 2 * x)) / (2 + 2 * x)) * N.x + test3 = test3.simplify() + assert (test3 & N.x) == 0 + + test4 = ((-4 * x * y**2 - 2 * y**3 - 2 * x**2 * y) / (x + y)**2) * N.x + test4 = test4.simplify() + assert (test4 & N.x) == -2 * y + + +def test_vector_evalf(): + a, b = symbols('a b') + v = pi * A.x + assert v.evalf(2) == Float('3.1416', 2) * A.x + v = pi * A.x + 5 * a * A.y - b * A.z + assert v.evalf(3) == Float('3.1416', 3) * A.x + Float('5', 3) * a * A.y - b * A.z + assert v.evalf(5, subs={a: 1.234, b:5.8973}) == Float('3.1415926536', 5) * A.x + Float('6.17', 5) * A.y - Float('5.8973', 5) * A.z + + +def test_vector_angle(): + A = ReferenceFrame('A') + v1 = A.x + A.y + v2 = A.z + assert v1.angle_between(v2) == pi/2 + B = ReferenceFrame('B') + B.orient_axis(A, A.x, pi) + v3 = A.x + v4 = B.x + assert v3.angle_between(v4) == 0 + + +def test_vector_xreplace(): + x, y, z = symbols('x y z') + v = x**2 * A.x + x*y * A.y + x*y*z * A.z + assert v.xreplace({x : cos(x)}) == cos(x)**2 * A.x + y*cos(x) * A.y + y*z*cos(x) * A.z + assert v.xreplace({x*y : pi}) == x**2 * A.x + pi * A.y + x*y*z * A.z + assert v.xreplace({x*y*z : 1}) == x**2*A.x + x*y*A.y + A.z + assert v.xreplace({x:1, z:0}) == A.x + y * A.y + raises(TypeError, lambda: v.xreplace()) + raises(TypeError, lambda: v.xreplace([x, y])) + +def test_issue_23366(): + u1 = dynamicsymbols('u1') + N = ReferenceFrame('N') + N_v_A = u1*N.x + raises(VectorTypeError, lambda: N_v_A.diff(N, u1)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/vector.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/vector.py new file mode 100644 index 0000000000000000000000000000000000000000..2130587a569dbb258556aca93bc3a293cef98ea2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/vector.py @@ -0,0 +1,858 @@ +from sympy.core.backend import (S, sympify, expand, sqrt, Add, zeros, acos, + ImmutableMatrix as Matrix, _simplify_matrix) +from sympy.simplify.trigsimp import trigsimp +from sympy.printing.defaults import Printable +from sympy.utilities.misc import filldedent +from sympy.core.evalf import EvalfMixin + +from mpmath.libmp.libmpf import prec_to_dps + + +__all__ = ['Vector'] + + +class Vector(Printable, EvalfMixin): + """The class used to define vectors. + + It along with ReferenceFrame are the building blocks of describing a + classical mechanics system in PyDy and sympy.physics.vector. + + Attributes + ========== + + simp : Boolean + Let certain methods use trigsimp on their outputs + + """ + + simp = False + is_number = False + + def __init__(self, inlist): + """This is the constructor for the Vector class. You should not be + calling this, it should only be used by other functions. You should be + treating Vectors like you would with if you were doing the math by + hand, and getting the first 3 from the standard basis vectors from a + ReferenceFrame. + + The only exception is to create a zero vector: + zv = Vector(0) + + """ + + self.args = [] + if inlist == 0: + inlist = [] + if isinstance(inlist, dict): + d = inlist + else: + d = {} + for inp in inlist: + if inp[1] in d: + d[inp[1]] += inp[0] + else: + d[inp[1]] = inp[0] + + for k, v in d.items(): + if v != Matrix([0, 0, 0]): + self.args.append((v, k)) + + @property + def func(self): + """Returns the class Vector. """ + return Vector + + def __hash__(self): + return hash(tuple(self.args)) + + def __add__(self, other): + """The add operator for Vector. """ + if other == 0: + return self + other = _check_vector(other) + return Vector(self.args + other.args) + + def __and__(self, other): + """Dot product of two vectors. + + Returns a scalar, the dot product of the two Vectors + + Parameters + ========== + + other : Vector + The Vector which we are dotting with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dot + >>> from sympy import symbols + >>> q1 = symbols('q1') + >>> N = ReferenceFrame('N') + >>> dot(N.x, N.x) + 1 + >>> dot(N.x, N.y) + 0 + >>> A = N.orientnew('A', 'Axis', [q1, N.x]) + >>> dot(N.y, A.y) + cos(q1) + + """ + + from sympy.physics.vector.dyadic import Dyadic + if isinstance(other, Dyadic): + return NotImplemented + other = _check_vector(other) + out = S.Zero + for i, v1 in enumerate(self.args): + for j, v2 in enumerate(other.args): + out += ((v2[0].T) + * (v2[1].dcm(v1[1])) + * (v1[0]))[0] + if Vector.simp: + return trigsimp(out, recursive=True) + else: + return out + + def __truediv__(self, other): + """This uses mul and inputs self and 1 divided by other. """ + return self.__mul__(S.One / other) + + def __eq__(self, other): + """Tests for equality. + + It is very import to note that this is only as good as the SymPy + equality test; False does not always mean they are not equivalent + Vectors. + If other is 0, and self is empty, returns True. + If other is 0 and self is not empty, returns False. + If none of the above, only accepts other as a Vector. + + """ + + if other == 0: + other = Vector(0) + try: + other = _check_vector(other) + except TypeError: + return False + if (self.args == []) and (other.args == []): + return True + elif (self.args == []) or (other.args == []): + return False + + frame = self.args[0][1] + for v in frame: + if expand((self - other) & v) != 0: + return False + return True + + def __mul__(self, other): + """Multiplies the Vector by a sympifyable expression. + + Parameters + ========== + + other : Sympifyable + The scalar to multiply this Vector with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy import Symbol + >>> N = ReferenceFrame('N') + >>> b = Symbol('b') + >>> V = 10 * b * N.x + >>> print(V) + 10*b*N.x + + """ + + newlist = list(self.args) + other = sympify(other) + for i, v in enumerate(newlist): + newlist[i] = (other * newlist[i][0], newlist[i][1]) + return Vector(newlist) + + def __neg__(self): + return self * -1 + + def __or__(self, other): + """Outer product between two Vectors. + + A rank increasing operation, which returns a Dyadic from two Vectors + + Parameters + ========== + + other : Vector + The Vector to take the outer product with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer + >>> N = ReferenceFrame('N') + >>> outer(N.x, N.x) + (N.x|N.x) + + """ + + from sympy.physics.vector.dyadic import Dyadic + other = _check_vector(other) + ol = Dyadic(0) + for i, v in enumerate(self.args): + for i2, v2 in enumerate(other.args): + # it looks this way because if we are in the same frame and + # use the enumerate function on the same frame in a nested + # fashion, then bad things happen + ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)]) + ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)]) + ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)]) + ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)]) + ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)]) + ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)]) + ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)]) + ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)]) + ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)]) + return ol + + def _latex(self, printer): + """Latex Printing method. """ + + ar = self.args # just to shorten things + if len(ar) == 0: + return str(0) + ol = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + for j in 0, 1, 2: + # if the coef of the basis vector is 1, we skip the 1 + if ar[i][0][j] == 1: + ol.append(' + ' + ar[i][1].latex_vecs[j]) + # if the coef of the basis vector is -1, we skip the 1 + elif ar[i][0][j] == -1: + ol.append(' - ' + ar[i][1].latex_vecs[j]) + elif ar[i][0][j] != 0: + # If the coefficient of the basis vector is not 1 or -1; + # also, we might wrap it in parentheses, for readability. + arg_str = printer._print(ar[i][0][j]) + if isinstance(ar[i][0][j], Add): + arg_str = "(%s)" % arg_str + if arg_str[0] == '-': + arg_str = arg_str[1:] + str_start = ' - ' + else: + str_start = ' + ' + ol.append(str_start + arg_str + ar[i][1].latex_vecs[j]) + outstr = ''.join(ol) + if outstr.startswith(' + '): + outstr = outstr[3:] + elif outstr.startswith(' '): + outstr = outstr[1:] + return outstr + + def _pretty(self, printer): + """Pretty Printing method. """ + from sympy.printing.pretty.stringpict import prettyForm + e = self + + class Fake: + + def render(self, *args, **kwargs): + ar = e.args # just to shorten things + if len(ar) == 0: + return str(0) + pforms = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + for j in 0, 1, 2: + # if the coef of the basis vector is 1, we skip the 1 + if ar[i][0][j] == 1: + pform = printer._print(ar[i][1].pretty_vecs[j]) + # if the coef of the basis vector is -1, we skip the 1 + elif ar[i][0][j] == -1: + pform = printer._print(ar[i][1].pretty_vecs[j]) + pform = prettyForm(*pform.left(" - ")) + bin = prettyForm.NEG + pform = prettyForm(binding=bin, *pform) + elif ar[i][0][j] != 0: + # If the basis vector coeff is not 1 or -1, + # we might wrap it in parentheses, for readability. + pform = printer._print(ar[i][0][j]) + + if isinstance(ar[i][0][j], Add): + tmp = pform.parens() + pform = prettyForm(tmp[0], tmp[1]) + + pform = prettyForm(*pform.right( + " ", ar[i][1].pretty_vecs[j])) + else: + continue + pforms.append(pform) + + pform = prettyForm.__add__(*pforms) + kwargs["wrap_line"] = kwargs.get("wrap_line") + kwargs["num_columns"] = kwargs.get("num_columns") + out_str = pform.render(*args, **kwargs) + mlines = [line.rstrip() for line in out_str.split("\n")] + return "\n".join(mlines) + + return Fake() + + def __ror__(self, other): + """Outer product between two Vectors. + + A rank increasing operation, which returns a Dyadic from two Vectors + + Parameters + ========== + + other : Vector + The Vector to take the outer product with + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, outer + >>> N = ReferenceFrame('N') + >>> outer(N.x, N.x) + (N.x|N.x) + + """ + + from sympy.physics.vector.dyadic import Dyadic + other = _check_vector(other) + ol = Dyadic(0) + for i, v in enumerate(other.args): + for i2, v2 in enumerate(self.args): + # it looks this way because if we are in the same frame and + # use the enumerate function on the same frame in a nested + # fashion, then bad things happen + ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)]) + ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)]) + ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)]) + ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)]) + ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)]) + ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)]) + ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)]) + ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)]) + ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)]) + return ol + + def __rsub__(self, other): + return (-1 * self) + other + + def _sympystr(self, printer, order=True): + """Printing method. """ + if not order or len(self.args) == 1: + ar = list(self.args) + elif len(self.args) == 0: + return printer._print(0) + else: + d = {v[1]: v[0] for v in self.args} + keys = sorted(d.keys(), key=lambda x: x.index) + ar = [] + for key in keys: + ar.append((d[key], key)) + ol = [] # output list, to be concatenated to a string + for i, v in enumerate(ar): + for j in 0, 1, 2: + # if the coef of the basis vector is 1, we skip the 1 + if ar[i][0][j] == 1: + ol.append(' + ' + ar[i][1].str_vecs[j]) + # if the coef of the basis vector is -1, we skip the 1 + elif ar[i][0][j] == -1: + ol.append(' - ' + ar[i][1].str_vecs[j]) + elif ar[i][0][j] != 0: + # If the coefficient of the basis vector is not 1 or -1; + # also, we might wrap it in parentheses, for readability. + arg_str = printer._print(ar[i][0][j]) + if isinstance(ar[i][0][j], Add): + arg_str = "(%s)" % arg_str + if arg_str[0] == '-': + arg_str = arg_str[1:] + str_start = ' - ' + else: + str_start = ' + ' + ol.append(str_start + arg_str + '*' + ar[i][1].str_vecs[j]) + outstr = ''.join(ol) + if outstr.startswith(' + '): + outstr = outstr[3:] + elif outstr.startswith(' '): + outstr = outstr[1:] + return outstr + + def __sub__(self, other): + """The subtraction operator. """ + return self.__add__(other * -1) + + def __xor__(self, other): + """The cross product operator for two Vectors. + + Returns a Vector, expressed in the same ReferenceFrames as self. + + Parameters + ========== + + other : Vector + The Vector which we are crossing with + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame, cross + >>> q1 = symbols('q1') + >>> N = ReferenceFrame('N') + >>> cross(N.x, N.y) + N.z + >>> A = ReferenceFrame('A') + >>> A.orient_axis(N, q1, N.x) + >>> cross(A.x, N.y) + N.z + >>> cross(N.y, A.x) + - sin(q1)*A.y - cos(q1)*A.z + + """ + + from sympy.physics.vector.dyadic import Dyadic + if isinstance(other, Dyadic): + return NotImplemented + other = _check_vector(other) + if other.args == []: + return Vector(0) + + def _det(mat): + """This is needed as a little method for to find the determinant + of a list in python; needs to work for a 3x3 list. + SymPy's Matrix will not take in Vector, so need a custom function. + You should not be calling this. + + """ + + return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1]) + + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] * + mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] - + mat[1][1] * mat[2][0])) + + outlist = [] + ar = other.args # For brevity + for i, v in enumerate(ar): + tempx = v[1].x + tempy = v[1].y + tempz = v[1].z + tempm = ([[tempx, tempy, tempz], + [self & tempx, self & tempy, self & tempz], + [Vector([ar[i]]) & tempx, Vector([ar[i]]) & tempy, + Vector([ar[i]]) & tempz]]) + outlist += _det(tempm).args + return Vector(outlist) + + __radd__ = __add__ + __rand__ = __and__ + __rmul__ = __mul__ + + def separate(self): + """ + The constituents of this vector in different reference frames, + as per its definition. + + Returns a dict mapping each ReferenceFrame to the corresponding + constituent Vector. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> R1 = ReferenceFrame('R1') + >>> R2 = ReferenceFrame('R2') + >>> v = R1.x + R2.x + >>> v.separate() == {R1: R1.x, R2: R2.x} + True + + """ + + components = {} + for x in self.args: + components[x[1]] = Vector([x]) + return components + + def dot(self, other): + return self & other + dot.__doc__ = __and__.__doc__ + + def cross(self, other): + return self ^ other + cross.__doc__ = __xor__.__doc__ + + def outer(self, other): + return self | other + outer.__doc__ = __or__.__doc__ + + def diff(self, var, frame, var_in_dcm=True): + """Returns the partial derivative of the vector with respect to a + variable in the provided reference frame. + + Parameters + ========== + var : Symbol + What the partial derivative is taken with respect to. + frame : ReferenceFrame + The reference frame that the partial derivative is taken in. + var_in_dcm : boolean + If true, the differentiation algorithm assumes that the variable + may be present in any of the direction cosine matrices that relate + the frame to the frames of any component of the vector. But if it + is known that the variable is not present in the direction cosine + matrices, false can be set to skip full reexpression in the desired + frame. + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.physics.vector import dynamicsymbols, ReferenceFrame + >>> from sympy.physics.vector import Vector + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> Vector.simp = True + >>> t = Symbol('t') + >>> q1 = dynamicsymbols('q1') + >>> N = ReferenceFrame('N') + >>> A = N.orientnew('A', 'Axis', [q1, N.y]) + >>> A.x.diff(t, N) + - sin(q1)*q1'*N.x - cos(q1)*q1'*N.z + >>> A.x.diff(t, N).express(A) + - q1'*A.z + >>> B = ReferenceFrame('B') + >>> u1, u2 = dynamicsymbols('u1, u2') + >>> v = u1 * A.x + u2 * B.y + >>> v.diff(u2, N, var_in_dcm=False) + B.y + + """ + + from sympy.physics.vector.frame import _check_frame + + _check_frame(frame) + var = sympify(var) + + inlist = [] + + for vector_component in self.args: + measure_number = vector_component[0] + component_frame = vector_component[1] + if component_frame == frame: + inlist += [(measure_number.diff(var), frame)] + else: + # If the direction cosine matrix relating the component frame + # with the derivative frame does not contain the variable. + if not var_in_dcm or (frame.dcm(component_frame).diff(var) == + zeros(3, 3)): + inlist += [(measure_number.diff(var), component_frame)] + else: # else express in the frame + reexp_vec_comp = Vector([vector_component]).express(frame) + deriv = reexp_vec_comp.args[0][0].diff(var) + inlist += Vector([(deriv, frame)]).args + + return Vector(inlist) + + def express(self, otherframe, variables=False): + """ + Returns a Vector equivalent to this one, expressed in otherframe. + Uses the global express method. + + Parameters + ========== + + otherframe : ReferenceFrame + The frame for this Vector to be described in + + variables : boolean + If True, the coordinate symbols(if present) in this Vector + are re-expressed in terms otherframe + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols + >>> from sympy.physics.vector import init_vprinting + >>> init_vprinting(pretty_print=False) + >>> q1 = dynamicsymbols('q1') + >>> N = ReferenceFrame('N') + >>> A = N.orientnew('A', 'Axis', [q1, N.y]) + >>> A.x.express(N) + cos(q1)*N.x - sin(q1)*N.z + + """ + from sympy.physics.vector import express + return express(self, otherframe, variables=variables) + + def to_matrix(self, reference_frame): + """Returns the matrix form of the vector with respect to the given + frame. + + Parameters + ---------- + reference_frame : ReferenceFrame + The reference frame that the rows of the matrix correspond to. + + Returns + ------- + matrix : ImmutableMatrix, shape(3,1) + The matrix that gives the 1D vector. + + Examples + ======== + + >>> from sympy import symbols + >>> from sympy.physics.vector import ReferenceFrame + >>> a, b, c = symbols('a, b, c') + >>> N = ReferenceFrame('N') + >>> vector = a * N.x + b * N.y + c * N.z + >>> vector.to_matrix(N) + Matrix([ + [a], + [b], + [c]]) + >>> beta = symbols('beta') + >>> A = N.orientnew('A', 'Axis', (beta, N.x)) + >>> vector.to_matrix(A) + Matrix([ + [ a], + [ b*cos(beta) + c*sin(beta)], + [-b*sin(beta) + c*cos(beta)]]) + + """ + + return Matrix([self.dot(unit_vec) for unit_vec in + reference_frame]).reshape(3, 1) + + def doit(self, **hints): + """Calls .doit() on each term in the Vector""" + d = {} + for v in self.args: + d[v[1]] = v[0].applyfunc(lambda x: x.doit(**hints)) + return Vector(d) + + def dt(self, otherframe): + """ + Returns a Vector which is the time derivative of + the self Vector, taken in frame otherframe. + + Calls the global time_derivative method + + Parameters + ========== + + otherframe : ReferenceFrame + The frame to calculate the time derivative in + + """ + from sympy.physics.vector import time_derivative + return time_derivative(self, otherframe) + + def simplify(self): + """Returns a simplified Vector.""" + d = {} + for v in self.args: + d[v[1]] = _simplify_matrix(v[0]) + return Vector(d) + + def subs(self, *args, **kwargs): + """Substitution on the Vector. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> from sympy import Symbol + >>> N = ReferenceFrame('N') + >>> s = Symbol('s') + >>> a = N.x * s + >>> a.subs({s: 2}) + 2*N.x + + """ + + d = {} + for v in self.args: + d[v[1]] = v[0].subs(*args, **kwargs) + return Vector(d) + + def magnitude(self): + """Returns the magnitude (Euclidean norm) of self. + + Warnings + ======== + + Python ignores the leading negative sign so that might + give wrong results. + ``-A.x.magnitude()`` would be treated as ``-(A.x.magnitude())``, + instead of ``(-A.x).magnitude()``. + + """ + return sqrt(self & self) + + def normalize(self): + """Returns a Vector of magnitude 1, codirectional with self.""" + return Vector(self.args + []) / self.magnitude() + + def applyfunc(self, f): + """Apply a function to each component of a vector.""" + if not callable(f): + raise TypeError("`f` must be callable.") + + d = {} + for v in self.args: + d[v[1]] = v[0].applyfunc(f) + return Vector(d) + + def angle_between(self, vec): + """ + Returns the smallest angle between Vector 'vec' and self. + + Parameter + ========= + + vec : Vector + The Vector between which angle is needed. + + Examples + ======== + + >>> from sympy.physics.vector import ReferenceFrame + >>> A = ReferenceFrame("A") + >>> v1 = A.x + >>> v2 = A.y + >>> v1.angle_between(v2) + pi/2 + + >>> v3 = A.x + A.y + A.z + >>> v1.angle_between(v3) + acos(sqrt(3)/3) + + Warnings + ======== + + Python ignores the leading negative sign so that might give wrong + results. ``-A.x.angle_between()`` would be treated as + ``-(A.x.angle_between())``, instead of ``(-A.x).angle_between()``. + + """ + + vec1 = self.normalize() + vec2 = vec.normalize() + angle = acos(vec1.dot(vec2)) + return angle + + def free_symbols(self, reference_frame): + """Returns the free symbols in the measure numbers of the vector + expressed in the given reference frame. + + Parameters + ========== + reference_frame : ReferenceFrame + The frame with respect to which the free symbols of the given + vector is to be determined. + + Returns + ======= + set of Symbol + set of symbols present in the measure numbers of + ``reference_frame``. + + """ + + return self.to_matrix(reference_frame).free_symbols + + def free_dynamicsymbols(self, reference_frame): + """Returns the free dynamic symbols (functions of time ``t``) in the + measure numbers of the vector expressed in the given reference frame. + + Parameters + ========== + reference_frame : ReferenceFrame + The frame with respect to which the free dynamic symbols of the + given vector is to be determined. + + Returns + ======= + set + Set of functions of time ``t``, e.g. + ``Function('f')(me.dynamicsymbols._t)``. + + """ + # TODO : Circular dependency if imported at top. Should move + # find_dynamicsymbols into physics.vector.functions. + from sympy.physics.mechanics.functions import find_dynamicsymbols + + return find_dynamicsymbols(self, reference_frame=reference_frame) + + def _eval_evalf(self, prec): + if not self.args: + return self + new_args = [] + dps = prec_to_dps(prec) + for mat, frame in self.args: + new_args.append([mat.evalf(n=dps), frame]) + return Vector(new_args) + + def xreplace(self, rule): + """Replace occurrences of objects within the measure numbers of the + vector. + + Parameters + ========== + + rule : dict-like + Expresses a replacement rule. + + Returns + ======= + + Vector + Result of the replacement. + + Examples + ======== + + >>> from sympy import symbols, pi + >>> from sympy.physics.vector import ReferenceFrame + >>> A = ReferenceFrame('A') + >>> x, y, z = symbols('x y z') + >>> ((1 + x*y) * A.x).xreplace({x: pi}) + (pi*y + 1)*A.x + >>> ((1 + x*y) * A.x).xreplace({x: pi, y: 2}) + (1 + 2*pi)*A.x + + Replacements occur only if an entire node in the expression tree is + matched: + + >>> ((x*y + z) * A.x).xreplace({x*y: pi}) + (z + pi)*A.x + >>> ((x*y*z) * A.x).xreplace({x*y: pi}) + x*y*z*A.x + + """ + + new_args = [] + for mat, frame in self.args: + mat = mat.xreplace(rule) + new_args.append([mat, frame]) + return Vector(new_args) + + +class VectorTypeError(TypeError): + + def __init__(self, other, want): + msg = filldedent("Expected an instance of %s, but received object " + "'%s' of %s." % (type(want), other, type(other))) + super().__init__(msg) + + +def _check_vector(other): + if not isinstance(other, Vector): + raise TypeError('A Vector must be supplied') + return other diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/physics/wigner.py b/llmeval-env/lib/python3.10/site-packages/sympy/physics/wigner.py new file mode 100644 index 0000000000000000000000000000000000000000..ef404b98acee9893cb582c1790622603b805d866 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/physics/wigner.py @@ -0,0 +1,1159 @@ +# -*- coding: utf-8 -*- +r""" +Wigner, Clebsch-Gordan, Racah, and Gaunt coefficients + +Collection of functions for calculating Wigner 3j, 6j, 9j, +Clebsch-Gordan, Racah as well as Gaunt coefficients exactly, all +evaluating to a rational number times the square root of a rational +number [Rasch03]_. + +Please see the description of the individual functions for further +details and examples. + +References +========== + +.. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients', + T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958) +.. [Regge59] 'Symmetry Properties of Racah Coefficients', + T. Regge, Nuovo Cimento, Volume 11, pp. 116 (1959) +.. [Edmonds74] A. R. Edmonds. Angular momentum in quantum mechanics. + Investigations in physics, 4.; Investigations in physics, no. 4. + Princeton, N.J., Princeton University Press, 1957. +.. [Rasch03] J. Rasch and A. C. H. Yu, 'Efficient Storage Scheme for + Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM + J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003) +.. [Liberatodebrito82] 'FORTRAN program for the integral of three + spherical harmonics', A. Liberato de Brito, + Comput. Phys. Commun., Volume 25, pp. 81-85 (1982) +.. [Homeier96] 'Some Properties of the Coupling Coefficients of Real + Spherical Harmonics and Their Relation to Gaunt Coefficients', + H. H. H. Homeier and E. O. Steinborn J. Mol. Struct., Volume 368, + pp. 31-37 (1996) + +Credits and Copyright +===================== + +This code was taken from Sage with the permission of all authors: + +https://groups.google.com/forum/#!topic/sage-devel/M4NZdu-7O38 + +Authors +======= + +- Jens Rasch (2009-03-24): initial version for Sage + +- Jens Rasch (2009-05-31): updated to sage-4.0 + +- Oscar Gerardo Lazo Arjona (2017-06-18): added Wigner D matrices + +- Phil Adam LeMaitre (2022-09-19): added real Gaunt coefficient + +Copyright (C) 2008 Jens Rasch + +""" +from sympy.concrete.summations import Sum +from sympy.core.add import Add +from sympy.core.function import Function +from sympy.core.numbers import (I, Integer, pi) +from sympy.core.singleton import S +from sympy.core.symbol import Dummy +from sympy.core.sympify import sympify +from sympy.functions.combinatorial.factorials import (binomial, factorial) +from sympy.functions.elementary.complexes import re +from sympy.functions.elementary.exponential import exp +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.functions.special.spherical_harmonics import Ynm +from sympy.matrices.dense import zeros +from sympy.matrices.immutable import ImmutableMatrix +from sympy.utilities.misc import as_int + +# This list of precomputed factorials is needed to massively +# accelerate future calculations of the various coefficients +_Factlist = [1] + + +def _calc_factlist(nn): + r""" + Function calculates a list of precomputed factorials in order to + massively accelerate future calculations of the various + coefficients. + + Parameters + ========== + + nn : integer + Highest factorial to be computed. + + Returns + ======= + + list of integers : + The list of precomputed factorials. + + Examples + ======== + + Calculate list of factorials:: + + sage: from sage.functions.wigner import _calc_factlist + sage: _calc_factlist(10) + [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800] + """ + if nn >= len(_Factlist): + for ii in range(len(_Factlist), int(nn + 1)): + _Factlist.append(_Factlist[ii - 1] * ii) + return _Factlist[:int(nn) + 1] + + +def wigner_3j(j_1, j_2, j_3, m_1, m_2, m_3): + r""" + Calculate the Wigner 3j symbol `\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)`. + + Parameters + ========== + + j_1, j_2, j_3, m_1, m_2, m_3 : + Integer or half integer. + + Returns + ======= + + Rational number times the square root of a rational number. + + Examples + ======== + + >>> from sympy.physics.wigner import wigner_3j + >>> wigner_3j(2, 6, 4, 0, 0, 0) + sqrt(715)/143 + >>> wigner_3j(2, 6, 4, 0, 0, 1) + 0 + + It is an error to have arguments that are not integer or half + integer values:: + + sage: wigner_3j(2.1, 6, 4, 0, 0, 0) + Traceback (most recent call last): + ... + ValueError: j values must be integer or half integer + sage: wigner_3j(2, 6, 4, 1, 0, -1.1) + Traceback (most recent call last): + ... + ValueError: m values must be integer or half integer + + Notes + ===== + + The Wigner 3j symbol obeys the following symmetry rules: + + - invariant under any permutation of the columns (with the + exception of a sign change where `J:=j_1+j_2+j_3`): + + .. math:: + + \begin{aligned} + \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3) + &=\operatorname{Wigner3j}(j_3,j_1,j_2,m_3,m_1,m_2) \\ + &=\operatorname{Wigner3j}(j_2,j_3,j_1,m_2,m_3,m_1) \\ + &=(-1)^J \operatorname{Wigner3j}(j_3,j_2,j_1,m_3,m_2,m_1) \\ + &=(-1)^J \operatorname{Wigner3j}(j_1,j_3,j_2,m_1,m_3,m_2) \\ + &=(-1)^J \operatorname{Wigner3j}(j_2,j_1,j_3,m_2,m_1,m_3) + \end{aligned} + + - invariant under space inflection, i.e. + + .. math:: + + \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3) + =(-1)^J \operatorname{Wigner3j}(j_1,j_2,j_3,-m_1,-m_2,-m_3) + + - symmetric with respect to the 72 additional symmetries based on + the work by [Regge58]_ + + - zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation + + - zero for `m_1 + m_2 + m_3 \neq 0` + + - zero for violating any one of the conditions + `j_1 \ge |m_1|`, `j_2 \ge |m_2|`, `j_3 \ge |m_3|` + + Algorithm + ========= + + This function uses the algorithm of [Edmonds74]_ to calculate the + value of the 3j symbol exactly. Note that the formula contains + alternating sums over large factorials and is therefore unsuitable + for finite precision arithmetic and only useful for a computer + algebra system [Rasch03]_. + + Authors + ======= + + - Jens Rasch (2009-03-24): initial version + """ + if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \ + int(j_3 * 2) != j_3 * 2: + raise ValueError("j values must be integer or half integer") + if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \ + int(m_3 * 2) != m_3 * 2: + raise ValueError("m values must be integer or half integer") + if m_1 + m_2 + m_3 != 0: + return S.Zero + prefid = Integer((-1) ** int(j_1 - j_2 - m_3)) + m_3 = -m_3 + a1 = j_1 + j_2 - j_3 + if a1 < 0: + return S.Zero + a2 = j_1 - j_2 + j_3 + if a2 < 0: + return S.Zero + a3 = -j_1 + j_2 + j_3 + if a3 < 0: + return S.Zero + if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3): + return S.Zero + + maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2), + j_3 + abs(m_3)) + _calc_factlist(int(maxfact)) + + argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] * + _Factlist[int(j_1 - j_2 + j_3)] * + _Factlist[int(-j_1 + j_2 + j_3)] * + _Factlist[int(j_1 - m_1)] * + _Factlist[int(j_1 + m_1)] * + _Factlist[int(j_2 - m_2)] * + _Factlist[int(j_2 + m_2)] * + _Factlist[int(j_3 - m_3)] * + _Factlist[int(j_3 + m_3)]) / \ + _Factlist[int(j_1 + j_2 + j_3 + 1)] + + ressqrt = sqrt(argsqrt) + if ressqrt.is_complex or ressqrt.is_infinite: + ressqrt = ressqrt.as_real_imag()[0] + + imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0) + imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3) + sumres = 0 + for ii in range(int(imin), int(imax) + 1): + den = _Factlist[ii] * \ + _Factlist[int(ii + j_3 - j_1 - m_2)] * \ + _Factlist[int(j_2 + m_2 - ii)] * \ + _Factlist[int(j_1 - ii - m_1)] * \ + _Factlist[int(ii + j_3 - j_2 + m_1)] * \ + _Factlist[int(j_1 + j_2 - j_3 - ii)] + sumres = sumres + Integer((-1) ** ii) / den + + res = ressqrt * sumres * prefid + return res + + +def clebsch_gordan(j_1, j_2, j_3, m_1, m_2, m_3): + r""" + Calculates the Clebsch-Gordan coefficient. + `\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle`. + + The reference for this function is [Edmonds74]_. + + Parameters + ========== + + j_1, j_2, j_3, m_1, m_2, m_3 : + Integer or half integer. + + Returns + ======= + + Rational number times the square root of a rational number. + + Examples + ======== + + >>> from sympy import S + >>> from sympy.physics.wigner import clebsch_gordan + >>> clebsch_gordan(S(3)/2, S(1)/2, 2, S(3)/2, S(1)/2, 2) + 1 + >>> clebsch_gordan(S(3)/2, S(1)/2, 1, S(3)/2, -S(1)/2, 1) + sqrt(3)/2 + >>> clebsch_gordan(S(3)/2, S(1)/2, 1, -S(1)/2, S(1)/2, 0) + -sqrt(2)/2 + + Notes + ===== + + The Clebsch-Gordan coefficient will be evaluated via its relation + to Wigner 3j symbols: + + .. math:: + + \left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle + =(-1)^{j_1-j_2+m_3} \sqrt{2j_3+1} + \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,-m_3) + + See also the documentation on Wigner 3j symbols which exhibit much + higher symmetry relations than the Clebsch-Gordan coefficient. + + Authors + ======= + + - Jens Rasch (2009-03-24): initial version + """ + res = (-1) ** sympify(j_1 - j_2 + m_3) * sqrt(2 * j_3 + 1) * \ + wigner_3j(j_1, j_2, j_3, m_1, m_2, -m_3) + return res + + +def _big_delta_coeff(aa, bb, cc, prec=None): + r""" + Calculates the Delta coefficient of the 3 angular momenta for + Racah symbols. Also checks that the differences are of integer + value. + + Parameters + ========== + + aa : + First angular momentum, integer or half integer. + bb : + Second angular momentum, integer or half integer. + cc : + Third angular momentum, integer or half integer. + prec : + Precision of the ``sqrt()`` calculation. + + Returns + ======= + + double : Value of the Delta coefficient. + + Examples + ======== + + sage: from sage.functions.wigner import _big_delta_coeff + sage: _big_delta_coeff(1,1,1) + 1/2*sqrt(1/6) + """ + + if int(aa + bb - cc) != (aa + bb - cc): + raise ValueError("j values must be integer or half integer and fulfill the triangle relation") + if int(aa + cc - bb) != (aa + cc - bb): + raise ValueError("j values must be integer or half integer and fulfill the triangle relation") + if int(bb + cc - aa) != (bb + cc - aa): + raise ValueError("j values must be integer or half integer and fulfill the triangle relation") + if (aa + bb - cc) < 0: + return S.Zero + if (aa + cc - bb) < 0: + return S.Zero + if (bb + cc - aa) < 0: + return S.Zero + + maxfact = max(aa + bb - cc, aa + cc - bb, bb + cc - aa, aa + bb + cc + 1) + _calc_factlist(maxfact) + + argsqrt = Integer(_Factlist[int(aa + bb - cc)] * + _Factlist[int(aa + cc - bb)] * + _Factlist[int(bb + cc - aa)]) / \ + Integer(_Factlist[int(aa + bb + cc + 1)]) + + ressqrt = sqrt(argsqrt) + if prec: + ressqrt = ressqrt.evalf(prec).as_real_imag()[0] + return ressqrt + + +def racah(aa, bb, cc, dd, ee, ff, prec=None): + r""" + Calculate the Racah symbol `W(a,b,c,d;e,f)`. + + Parameters + ========== + + a, ..., f : + Integer or half integer. + prec : + Precision, default: ``None``. Providing a precision can + drastically speed up the calculation. + + Returns + ======= + + Rational number times the square root of a rational number + (if ``prec=None``), or real number if a precision is given. + + Examples + ======== + + >>> from sympy.physics.wigner import racah + >>> racah(3,3,3,3,3,3) + -1/14 + + Notes + ===== + + The Racah symbol is related to the Wigner 6j symbol: + + .. math:: + + \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) + =(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6) + + Please see the 6j symbol for its much richer symmetries and for + additional properties. + + Algorithm + ========= + + This function uses the algorithm of [Edmonds74]_ to calculate the + value of the 6j symbol exactly. Note that the formula contains + alternating sums over large factorials and is therefore unsuitable + for finite precision arithmetic and only useful for a computer + algebra system [Rasch03]_. + + Authors + ======= + + - Jens Rasch (2009-03-24): initial version + """ + prefac = _big_delta_coeff(aa, bb, ee, prec) * \ + _big_delta_coeff(cc, dd, ee, prec) * \ + _big_delta_coeff(aa, cc, ff, prec) * \ + _big_delta_coeff(bb, dd, ff, prec) + if prefac == 0: + return S.Zero + imin = max(aa + bb + ee, cc + dd + ee, aa + cc + ff, bb + dd + ff) + imax = min(aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff) + + maxfact = max(imax + 1, aa + bb + cc + dd, aa + dd + ee + ff, + bb + cc + ee + ff) + _calc_factlist(maxfact) + + sumres = 0 + for kk in range(int(imin), int(imax) + 1): + den = _Factlist[int(kk - aa - bb - ee)] * \ + _Factlist[int(kk - cc - dd - ee)] * \ + _Factlist[int(kk - aa - cc - ff)] * \ + _Factlist[int(kk - bb - dd - ff)] * \ + _Factlist[int(aa + bb + cc + dd - kk)] * \ + _Factlist[int(aa + dd + ee + ff - kk)] * \ + _Factlist[int(bb + cc + ee + ff - kk)] + sumres = sumres + Integer((-1) ** kk * _Factlist[kk + 1]) / den + + res = prefac * sumres * (-1) ** int(aa + bb + cc + dd) + return res + + +def wigner_6j(j_1, j_2, j_3, j_4, j_5, j_6, prec=None): + r""" + Calculate the Wigner 6j symbol `\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)`. + + Parameters + ========== + + j_1, ..., j_6 : + Integer or half integer. + prec : + Precision, default: ``None``. Providing a precision can + drastically speed up the calculation. + + Returns + ======= + + Rational number times the square root of a rational number + (if ``prec=None``), or real number if a precision is given. + + Examples + ======== + + >>> from sympy.physics.wigner import wigner_6j + >>> wigner_6j(3,3,3,3,3,3) + -1/14 + >>> wigner_6j(5,5,5,5,5,5) + 1/52 + + It is an error to have arguments that are not integer or half + integer values or do not fulfill the triangle relation:: + + sage: wigner_6j(2.5,2.5,2.5,2.5,2.5,2.5) + Traceback (most recent call last): + ... + ValueError: j values must be integer or half integer and fulfill the triangle relation + sage: wigner_6j(0.5,0.5,1.1,0.5,0.5,1.1) + Traceback (most recent call last): + ... + ValueError: j values must be integer or half integer and fulfill the triangle relation + + Notes + ===== + + The Wigner 6j symbol is related to the Racah symbol but exhibits + more symmetries as detailed below. + + .. math:: + + \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) + =(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6) + + The Wigner 6j symbol obeys the following symmetry rules: + + - Wigner 6j symbols are left invariant under any permutation of + the columns: + + .. math:: + + \begin{aligned} + \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) + &=\operatorname{Wigner6j}(j_3,j_1,j_2,j_6,j_4,j_5) \\ + &=\operatorname{Wigner6j}(j_2,j_3,j_1,j_5,j_6,j_4) \\ + &=\operatorname{Wigner6j}(j_3,j_2,j_1,j_6,j_5,j_4) \\ + &=\operatorname{Wigner6j}(j_1,j_3,j_2,j_4,j_6,j_5) \\ + &=\operatorname{Wigner6j}(j_2,j_1,j_3,j_5,j_4,j_6) + \end{aligned} + + - They are invariant under the exchange of the upper and lower + arguments in each of any two columns, i.e. + + .. math:: + + \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) + =\operatorname{Wigner6j}(j_1,j_5,j_6,j_4,j_2,j_3) + =\operatorname{Wigner6j}(j_4,j_2,j_6,j_1,j_5,j_3) + =\operatorname{Wigner6j}(j_4,j_5,j_3,j_1,j_2,j_6) + + - additional 6 symmetries [Regge59]_ giving rise to 144 symmetries + in total + + - only non-zero if any triple of `j`'s fulfill a triangle relation + + Algorithm + ========= + + This function uses the algorithm of [Edmonds74]_ to calculate the + value of the 6j symbol exactly. Note that the formula contains + alternating sums over large factorials and is therefore unsuitable + for finite precision arithmetic and only useful for a computer + algebra system [Rasch03]_. + + """ + res = (-1) ** int(j_1 + j_2 + j_4 + j_5) * \ + racah(j_1, j_2, j_5, j_4, j_3, j_6, prec) + return res + + +def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None): + r""" + Calculate the Wigner 9j symbol + `\operatorname{Wigner9j}(j_1,j_2,j_3,j_4,j_5,j_6,j_7,j_8,j_9)`. + + Parameters + ========== + + j_1, ..., j_9 : + Integer or half integer. + prec : precision, default + ``None``. Providing a precision can + drastically speed up the calculation. + + Returns + ======= + + Rational number times the square root of a rational number + (if ``prec=None``), or real number if a precision is given. + + Examples + ======== + + >>> from sympy.physics.wigner import wigner_9j + >>> wigner_9j(1,1,1, 1,1,1, 1,1,0, prec=64) # ==1/18 + 0.05555555... + + >>> wigner_9j(1/2,1/2,0, 1/2,3/2,1, 0,1,1, prec=64) # ==1/6 + 0.1666666... + + It is an error to have arguments that are not integer or half + integer values or do not fulfill the triangle relation:: + + sage: wigner_9j(0.5,0.5,0.5, 0.5,0.5,0.5, 0.5,0.5,0.5,prec=64) + Traceback (most recent call last): + ... + ValueError: j values must be integer or half integer and fulfill the triangle relation + sage: wigner_9j(1,1,1, 0.5,1,1.5, 0.5,1,2.5,prec=64) + Traceback (most recent call last): + ... + ValueError: j values must be integer or half integer and fulfill the triangle relation + + Algorithm + ========= + + This function uses the algorithm of [Edmonds74]_ to calculate the + value of the 3j symbol exactly. Note that the formula contains + alternating sums over large factorials and is therefore unsuitable + for finite precision arithmetic and only useful for a computer + algebra system [Rasch03]_. + """ + imax = int(min(j_1 + j_9, j_2 + j_6, j_4 + j_8) * 2) + imin = imax % 2 + sumres = 0 + for kk in range(imin, int(imax) + 1, 2): + sumres = sumres + (kk + 1) * \ + racah(j_1, j_2, j_9, j_6, j_3, kk / 2, prec) * \ + racah(j_4, j_6, j_8, j_2, j_5, kk / 2, prec) * \ + racah(j_1, j_4, j_9, j_8, j_7, kk / 2, prec) + return sumres + + +def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): + r""" + Calculate the Gaunt coefficient. + + Explanation + =========== + + The Gaunt coefficient is defined as the integral over three + spherical harmonics: + + .. math:: + + \begin{aligned} + \operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3) + &=\int Y_{l_1,m_1}(\Omega) + Y_{l_2,m_2}(\Omega) Y_{l_3,m_3}(\Omega) \,d\Omega \\ + &=\sqrt{\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\pi}} + \operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0) + \operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3) + \end{aligned} + + Parameters + ========== + + l_1, l_2, l_3, m_1, m_2, m_3 : + Integer. + prec - precision, default: ``None``. + Providing a precision can + drastically speed up the calculation. + + Returns + ======= + + Rational number times the square root of a rational number + (if ``prec=None``), or real number if a precision is given. + + Examples + ======== + + >>> from sympy.physics.wigner import gaunt + >>> gaunt(1,0,1,1,0,-1) + -1/(2*sqrt(pi)) + >>> gaunt(1000,1000,1200,9,3,-12).n(64) + 0.00689500421922113448... + + It is an error to use non-integer values for `l` and `m`:: + + sage: gaunt(1.2,0,1.2,0,0,0) + Traceback (most recent call last): + ... + ValueError: l values must be integer + sage: gaunt(1,0,1,1.1,0,-1.1) + Traceback (most recent call last): + ... + ValueError: m values must be integer + + Notes + ===== + + The Gaunt coefficient obeys the following symmetry rules: + + - invariant under any permutation of the columns + + .. math:: + \begin{aligned} + Y(l_1,l_2,l_3,m_1,m_2,m_3) + &=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\ + &=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\ + &=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\ + &=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\ + &=Y(l_2,l_1,l_3,m_2,m_1,m_3) + \end{aligned} + + - invariant under space inflection, i.e. + + .. math:: + Y(l_1,l_2,l_3,m_1,m_2,m_3) + =Y(l_1,l_2,l_3,-m_1,-m_2,-m_3) + + - symmetric with respect to the 72 Regge symmetries as inherited + for the `3j` symbols [Regge58]_ + + - zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation + + - zero for violating any one of the conditions: `l_1 \ge |m_1|`, + `l_2 \ge |m_2|`, `l_3 \ge |m_3|` + + - non-zero only for an even sum of the `l_i`, i.e. + `L = l_1 + l_2 + l_3 = 2n` for `n` in `\mathbb{N}` + + Algorithms + ========== + + This function uses the algorithm of [Liberatodebrito82]_ to + calculate the value of the Gaunt coefficient exactly. Note that + the formula contains alternating sums over large factorials and is + therefore unsuitable for finite precision arithmetic and only + useful for a computer algebra system [Rasch03]_. + + Authors + ======= + + Jens Rasch (2009-03-24): initial version for Sage. + """ + l_1, l_2, l_3, m_1, m_2, m_3 = [ + as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)] + + if l_1 + l_2 - l_3 < 0: + return S.Zero + if l_1 - l_2 + l_3 < 0: + return S.Zero + if -l_1 + l_2 + l_3 < 0: + return S.Zero + if (m_1 + m_2 + m_3) != 0: + return S.Zero + if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3): + return S.Zero + bigL, remL = divmod(l_1 + l_2 + l_3, 2) + if remL % 2: + return S.Zero + + imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0) + imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3) + + _calc_factlist(max(l_1 + l_2 + l_3 + 1, imax + 1)) + + ressqrt = sqrt((2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \ + _Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \ + _Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \ + (4*pi)) + + prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] * + _Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \ + _Factlist[2 * bigL + 1]/ \ + (_Factlist[bigL - l_1] * + _Factlist[bigL - l_2] * _Factlist[bigL - l_3]) + + sumres = 0 + for ii in range(int(imin), int(imax) + 1): + den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \ + _Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \ + _Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii] + sumres = sumres + Integer((-1) ** ii) / den + + res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2)) + if prec is not None: + res = res.n(prec) + return res + + +def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): + r""" + Calculate the real Gaunt coefficient. + + Explanation + =========== + + The real Gaunt coefficient is defined as the integral over three + real spherical harmonics: + + .. math:: + \begin{aligned} + \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) + &=\int Z^{m_1}_{l_1}(\Omega) + Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ + \end{aligned} + + Alternatively, it can be defined in terms of the standard Gaunt + coefficient by relating the real spherical harmonics to the standard + spherical harmonics via a unitary transformation `U`, i.e. + `Z^{m}_{l}(\Omega)=\sum_{m'}U^{m}_{m'}Y^{m'}_{l}(\Omega)` [Homeier96]_. + The real Gaunt coefficient is then defined as + + .. math:: + \begin{aligned} + \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) + &=\int Z^{m_1}_{l_1}(\Omega) + Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ + &=\sum_{m'_1 m'_2 m'_3} U^{m_1}_{m'_1}U^{m_2}_{m'_2}U^{m_3}_{m'_3} + \operatorname{Gaunt}(l_1,l_2,l_3,m'_1,m'_2,m'_3) + \end{aligned} + + The unitary matrix `U` has components + + .. math:: + \begin{aligned} + U^m_{m'} = \delta_{|m||m'|}*(\delta_{m'0}\delta_{m0} + \frac{1}{\sqrt{2}}\big[\Theta(m) + \big(\delta_{m'm}+(-1)^{m'}\delta_{m'-m}\big)+i\Theta(-m)\big((-1)^{-m} + \delta_{m'-m}-\delta_{m'm}*(-1)^{m'-m}\big)\big]) + \end{aligned} + + where `\delta_{ij}` is the Kronecker delta symbol and `\Theta` is a step + function defined as + + .. math:: + \begin{aligned} + \Theta(x) = \begin{cases} 1 \,\text{for}\, x > 0 \\ 0 \,\text{for}\, x \leq 0 \end{cases} + \end{aligned} + + Parameters + ========== + + l_1, l_2, l_3, m_1, m_2, m_3 : + Integer. + + prec - precision, default: ``None``. + Providing a precision can + drastically speed up the calculation. + + Returns + ======= + + Rational number times the square root of a rational number. + + Examples + ======== + + >>> from sympy.physics.wigner import real_gaunt + >>> real_gaunt(2,2,4,-1,-1,0) + -2/(7*sqrt(pi)) + >>> real_gaunt(10,10,20,-9,-9,0).n(64) + -0.00002480019791932209313156167... + + It is an error to use non-integer values for `l` and `m`:: + real_gaunt(2.8,0.5,1.3,0,0,0) + Traceback (most recent call last): + ... + ValueError: l values must be integer + real_gaunt(2,2,4,0.7,1,-3.4) + Traceback (most recent call last): + ... + ValueError: m values must be integer + + Notes + ===== + + The real Gaunt coefficient inherits from the standard Gaunt coefficient, + the invariance under any permutation of the pairs `(l_i, m_i)` and the + requirement that the sum of the `l_i` be even to yield a non-zero value. + It also obeys the following symmetry rules: + + - zero for `l_1`, `l_2`, `l_3` not fulfiling the condition + `l_1 \in \{l_{\text{max}}, l_{\text{max}}-2, \ldots, l_{\text{min}}\}`, + where `l_{\text{max}} = l_2+l_3`, + + .. math:: + \begin{aligned} + l_{\text{min}} = \begin{cases} \kappa(l_2, l_3, m_2, m_3) & \text{if}\, + \kappa(l_2, l_3, m_2, m_3) + l_{\text{max}}\, \text{is even} \\ + \kappa(l_2, l_3, m_2, m_3)+1 & \text{if}\, \kappa(l_2, l_3, m_2, m_3) + + l_{\text{max}}\, \text{is odd}\end{cases} + \end{aligned} + + and `\kappa(l_2, l_3, m_2, m_3) = \max{\big(|l_2-l_3|, \min{\big(|m_2+m_3|, + |m_2-m_3|\big)}\big)}` + + - zero for an odd number of negative `m_i` + + Algorithms + ========== + + This function uses the algorithms of [Homeier96]_ and [Rasch03]_ to + calculate the value of the real Gaunt coefficient exactly. Note that + the formula used in [Rasch03]_ contains alternating sums over large + factorials and is therefore unsuitable for finite precision arithmetic + and only useful for a computer algebra system [Rasch03]_. However, this + function can in principle use any algorithm that computes the Gaunt + coefficient, so it is suitable for finite precision arithmetic in so far + as the algorithm which computes the Gaunt coefficient is. + """ + l_1, l_2, l_3, m_1, m_2, m_3 = [ + as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)] + + # check for quick exits + if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2: + return S.Zero # odd number of negative m + if (l_1 + l_2 + l_3) % 2: + return S.Zero # sum of l is odd + lmax = l_2 + l_3 + lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3))) + if (lmin + lmax) % 2: + lmin += 1 + if lmin not in range(lmax, lmin - 2, -2): + return S.Zero + + kron_del = lambda i, j: 1 if i == j else 0 + s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0 + A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)* + s(b)) if b < 0 else 0 + B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0 + C = lambda a, b: kron_del(abs(a), abs(b))*(kron_del(a, 0)*kron_del(b, 0) + + (B(a, b) + I*A(a, b))/sqrt(2)) + ugnt = 0 + for i in range(-l_1, l_1+1): + U1 = C(i, m_1) + for j in range(-l_2, l_2+1): + U2 = C(j, m_2) + U3 = C(-i-j, m_3) + ugnt = ugnt + re(U1*U2*U3)*gaunt(l_1, l_2, l_3, i, j, -i-j) + + if prec is not None: + ugnt = ugnt.n(prec) + return ugnt + + +class Wigner3j(Function): + + def doit(self, **hints): + if all(obj.is_number for obj in self.args): + return wigner_3j(*self.args) + else: + return self + +def dot_rot_grad_Ynm(j, p, l, m, theta, phi): + r""" + Returns dot product of rotational gradients of spherical harmonics. + + Explanation + =========== + + This function returns the right hand side of the following expression: + + .. math :: + \vec{R}Y{_j^{p}} \cdot \vec{R}Y{_l^{m}} = (-1)^{m+p} + \sum\limits_{k=|l-j|}^{l+j}Y{_k^{m+p}} * \alpha_{l,m,j,p,k} * + \frac{1}{2} (k^2-j^2-l^2+k-j-l) + + + Arguments + ========= + + j, p, l, m .... indices in spherical harmonics (expressions or integers) + theta, phi .... angle arguments in spherical harmonics + + Example + ======= + + >>> from sympy import symbols + >>> from sympy.physics.wigner import dot_rot_grad_Ynm + >>> theta, phi = symbols("theta phi") + >>> dot_rot_grad_Ynm(3, 2, 2, 0, theta, phi).doit() + 3*sqrt(55)*Ynm(5, 2, theta, phi)/(11*sqrt(pi)) + + """ + j = sympify(j) + p = sympify(p) + l = sympify(l) + m = sympify(m) + theta = sympify(theta) + phi = sympify(phi) + k = Dummy("k") + + def alpha(l,m,j,p,k): + return sqrt((2*l+1)*(2*j+1)*(2*k+1)/(4*pi)) * \ + Wigner3j(j, l, k, S.Zero, S.Zero, S.Zero) * \ + Wigner3j(j, l, k, p, m, -m-p) + + return (S.NegativeOne)**(m+p) * Sum(Ynm(k, m+p, theta, phi) * alpha(l,m,j,p,k) / 2 \ + *(k**2-j**2-l**2+k-j-l), (k, abs(l-j), l+j)) + + +def wigner_d_small(J, beta): + """Return the small Wigner d matrix for angular momentum J. + + Explanation + =========== + + J : An integer, half-integer, or SymPy symbol for the total angular + momentum of the angular momentum space being rotated. + beta : A real number representing the Euler angle of rotation about + the so-called line of nodes. See [Edmonds74]_. + + Returns + ======= + + A matrix representing the corresponding Euler angle rotation( in the basis + of eigenvectors of `J_z`). + + .. math :: + \\mathcal{d}_{\\beta} = \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big) + + The components are calculated using the general form [Edmonds74]_, + equation 4.1.15. + + Examples + ======== + + >>> from sympy import Integer, symbols, pi, pprint + >>> from sympy.physics.wigner import wigner_d_small + >>> half = 1/Integer(2) + >>> beta = symbols("beta", real=True) + >>> pprint(wigner_d_small(half, beta), use_unicode=True) + ⎡ ⎛β⎞ ⎛β⎞⎤ + ⎢cos⎜─⎟ sin⎜─⎟⎥ + ⎢ ⎝2⎠ ⎝2⎠⎥ + ⎢ ⎥ + ⎢ ⎛β⎞ ⎛β⎞⎥ + ⎢-sin⎜─⎟ cos⎜─⎟⎥ + ⎣ ⎝2⎠ ⎝2⎠⎦ + + >>> pprint(wigner_d_small(2*half, beta), use_unicode=True) + ⎡ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎤ + ⎢ cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟ sin ⎜─⎟ ⎥ + ⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎥ + ⎢ ⎥ + ⎢ ⎛β⎞ ⎛β⎞ 2⎛β⎞ 2⎛β⎞ ⎛β⎞ ⎛β⎞⎥ + ⎢-√2⋅sin⎜─⎟⋅cos⎜─⎟ - sin ⎜─⎟ + cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟⎥ + ⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠⎥ + ⎢ ⎥ + ⎢ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎥ + ⎢ sin ⎜─⎟ -√2⋅sin⎜─⎟⋅cos⎜─⎟ cos ⎜─⎟ ⎥ + ⎣ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎦ + + From table 4 in [Edmonds74]_ + + >>> pprint(wigner_d_small(half, beta).subs({beta:pi/2}), use_unicode=True) + ⎡ √2 √2⎤ + ⎢ ── ──⎥ + ⎢ 2 2 ⎥ + ⎢ ⎥ + ⎢-√2 √2⎥ + ⎢──── ──⎥ + ⎣ 2 2 ⎦ + + >>> pprint(wigner_d_small(2*half, beta).subs({beta:pi/2}), + ... use_unicode=True) + ⎡ √2 ⎤ + ⎢1/2 ── 1/2⎥ + ⎢ 2 ⎥ + ⎢ ⎥ + ⎢-√2 √2 ⎥ + ⎢──── 0 ── ⎥ + ⎢ 2 2 ⎥ + ⎢ ⎥ + ⎢ -√2 ⎥ + ⎢1/2 ──── 1/2⎥ + ⎣ 2 ⎦ + + >>> pprint(wigner_d_small(3*half, beta).subs({beta:pi/2}), + ... use_unicode=True) + ⎡ √2 √6 √6 √2⎤ + ⎢ ── ── ── ──⎥ + ⎢ 4 4 4 4 ⎥ + ⎢ ⎥ + ⎢-√6 -√2 √2 √6⎥ + ⎢──── ──── ── ──⎥ + ⎢ 4 4 4 4 ⎥ + ⎢ ⎥ + ⎢ √6 -√2 -√2 √6⎥ + ⎢ ── ──── ──── ──⎥ + ⎢ 4 4 4 4 ⎥ + ⎢ ⎥ + ⎢-√2 √6 -√6 √2⎥ + ⎢──── ── ──── ──⎥ + ⎣ 4 4 4 4 ⎦ + + >>> pprint(wigner_d_small(4*half, beta).subs({beta:pi/2}), + ... use_unicode=True) + ⎡ √6 ⎤ + ⎢1/4 1/2 ── 1/2 1/4⎥ + ⎢ 4 ⎥ + ⎢ ⎥ + ⎢-1/2 -1/2 0 1/2 1/2⎥ + ⎢ ⎥ + ⎢ √6 √6 ⎥ + ⎢ ── 0 -1/2 0 ── ⎥ + ⎢ 4 4 ⎥ + ⎢ ⎥ + ⎢-1/2 1/2 0 -1/2 1/2⎥ + ⎢ ⎥ + ⎢ √6 ⎥ + ⎢1/4 -1/2 ── -1/2 1/4⎥ + ⎣ 4 ⎦ + + """ + M = [J-i for i in range(2*J+1)] + d = zeros(2*J+1) + for i, Mi in enumerate(M): + for j, Mj in enumerate(M): + + # We get the maximum and minimum value of sigma. + sigmamax = max([-Mi-Mj, J-Mj]) + sigmamin = min([0, J-Mi]) + + dij = sqrt(factorial(J+Mi)*factorial(J-Mi) / + factorial(J+Mj)/factorial(J-Mj)) + terms = [(-1)**(J-Mi-s) * + binomial(J+Mj, J-Mi-s) * + binomial(J-Mj, s) * + cos(beta/2)**(2*s+Mi+Mj) * + sin(beta/2)**(2*J-2*s-Mj-Mi) + for s in range(sigmamin, sigmamax+1)] + + d[i, j] = dij*Add(*terms) + + return ImmutableMatrix(d) + + +def wigner_d(J, alpha, beta, gamma): + """Return the Wigner D matrix for angular momentum J. + + Explanation + =========== + + J : + An integer, half-integer, or SymPy symbol for the total angular + momentum of the angular momentum space being rotated. + alpha, beta, gamma - Real numbers representing the Euler. + Angles of rotation about the so-called vertical, line of nodes, and + figure axes. See [Edmonds74]_. + + Returns + ======= + + A matrix representing the corresponding Euler angle rotation( in the basis + of eigenvectors of `J_z`). + + .. math :: + \\mathcal{D}_{\\alpha \\beta \\gamma} = + \\exp\\big( \\frac{i\\alpha}{\\hbar} J_z\\big) + \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big) + \\exp\\big( \\frac{i\\gamma}{\\hbar} J_z\\big) + + The components are calculated using the general form [Edmonds74]_, + equation 4.1.12. + + Examples + ======== + + The simplest possible example: + + >>> from sympy.physics.wigner import wigner_d + >>> from sympy import Integer, symbols, pprint + >>> half = 1/Integer(2) + >>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True) + >>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True) + ⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤ + ⎢ ─── ─── ─── ───── ⎥ + ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥ + ⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥ + ⎢ ⎝2⎠ ⎝2⎠ ⎥ + ⎢ ⎥ + ⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥ + ⎢ ───── ─── ───── ───── ⎥ + ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥ + ⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥ + ⎣ ⎝2⎠ ⎝2⎠⎦ + + """ + d = wigner_d_small(J, beta) + M = [J-i for i in range(2*J+1)] + D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma) + for j, Mj in enumerate(M)] for i, Mi in enumerate(M)] + return ImmutableMatrix(D)