diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..56e942902a96e7f012479a582c5cf89511219f9a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA @@ -0,0 +1,105 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 3.1.3 +Summary: A very fast and expressive template engine. +Home-page: https://palletsprojects.com/p/jinja/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/jinja/ +Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: MarkupSafe >=2.0 +Provides-Extra: i18n +Requires-Dist: Babel >=2.7 ; extra == 'i18n' + +Jinja +===== + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U Jinja2 + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +In A Nutshell +------------- + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +Donate +------ + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://jinja.palletsprojects.com/ +- Changes: https://jinja.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/Jinja2/ +- Source Code: https://github.com/pallets/jinja/ +- Issue Tracker: https://github.com/pallets/jinja/issues/ +- Chat: https://discord.gg/pallets diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..8a511ac007aa6ef6cabad0bdb17d166d1398c11d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD @@ -0,0 +1,58 @@ +Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301 +Jinja2-3.1.3.dist-info/RECORD,, +Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59 +Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927 +jinja2/__pycache__/__init__.cpython-310.pyc,, +jinja2/__pycache__/_identifier.cpython-310.pyc,, +jinja2/__pycache__/async_utils.cpython-310.pyc,, +jinja2/__pycache__/bccache.cpython-310.pyc,, +jinja2/__pycache__/compiler.cpython-310.pyc,, +jinja2/__pycache__/constants.cpython-310.pyc,, +jinja2/__pycache__/debug.cpython-310.pyc,, +jinja2/__pycache__/defaults.cpython-310.pyc,, +jinja2/__pycache__/environment.cpython-310.pyc,, +jinja2/__pycache__/exceptions.cpython-310.pyc,, +jinja2/__pycache__/ext.cpython-310.pyc,, +jinja2/__pycache__/filters.cpython-310.pyc,, +jinja2/__pycache__/idtracking.cpython-310.pyc,, +jinja2/__pycache__/lexer.cpython-310.pyc,, +jinja2/__pycache__/loaders.cpython-310.pyc,, +jinja2/__pycache__/meta.cpython-310.pyc,, +jinja2/__pycache__/nativetypes.cpython-310.pyc,, +jinja2/__pycache__/nodes.cpython-310.pyc,, +jinja2/__pycache__/optimizer.cpython-310.pyc,, +jinja2/__pycache__/parser.cpython-310.pyc,, +jinja2/__pycache__/runtime.cpython-310.pyc,, +jinja2/__pycache__/sandbox.cpython-310.pyc,, +jinja2/__pycache__/tests.cpython-310.pyc,, +jinja2/__pycache__/utils.cpython-310.pyc,, +jinja2/__pycache__/visitor.cpython-310.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447 +jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061 +jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844 +jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862 +jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704 +jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726 +jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085 +jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396 +jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210 +jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550 +jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650 +jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406 +jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584 +jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905 +jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933 +jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568 diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b9666c8ea311ea0f0cfe7bed861aaa5469f92bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[babel.extractors] +jinja2 = jinja2.ext:babel_extract[i18n] diff --git a/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/__init__.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bd1cd51810385ca0e5e9fed3fb9a804febf27e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a002ae6a0d18eccc6a39c761be73e4ac1a1fbd12 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f651b63867d03c27b38c88335af837a849b4f54d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac17c23b1fa3f7c1c567dc8d85f70f819cd244c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5fb7632a41326996cc3fc9cf23687e9fc9f3130 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/_bazelize_command.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/_bazelize_command.py new file mode 100644 index 0000000000000000000000000000000000000000..9380d27427859371e323e12e0759ff83157223bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/_bazelize_command.py @@ -0,0 +1,68 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helper for running tests on Windows Bazel.""" + +import os + +from absl import flags + +FLAGS = flags.FLAGS + + +def get_executable_path(py_binary_name): + """Returns the executable path of a py_binary. + + This returns the executable path of a py_binary that is in another Bazel + target's data dependencies. + + On Linux/macOS, the path and __file__ has the same root directory. + On Windows, bazel builds an .exe file and we need to use the MANIFEST file + the location the actual binary. + + Args: + py_binary_name: string, the name of a py_binary that is in another Bazel + target's data dependencies. + + Raises: + RuntimeError: Raised when it cannot locate the executable path. + """ + + if os.name == 'nt': + py_binary_name += '.exe' + manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST') + workspace_name = os.environ['TEST_WORKSPACE'] + manifest_entry = '{}/{}'.format(workspace_name, py_binary_name) + with open(manifest_file, 'r') as manifest_fd: + for line in manifest_fd: + tokens = line.strip().split(' ') + if len(tokens) != 2: + continue + if manifest_entry == tokens[0]: + return tokens[1] + raise RuntimeError( + 'Cannot locate executable path for {}, MANIFEST file: {}.'.format( + py_binary_name, manifest_file)) + else: + # NOTE: __file__ may be .py or .pyc, depending on how the module was + # loaded and executed. + path = __file__ + + # Use the package name to find the root directory: every dot is + # a directory, plus one for ourselves. + for _ in range(__name__.count('.') + 1): + path = os.path.dirname(path) + + root_directory = path + return os.path.join(root_directory, py_binary_name) diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/absltest.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/absltest.py new file mode 100644 index 0000000000000000000000000000000000000000..e43cb820654f80ca93eb1d71ee2c5f4b1748795d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/absltest.py @@ -0,0 +1,2713 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base functionality for Abseil Python tests. + +This module contains base classes and high-level functions for Abseil-style +tests. +""" + +from collections import abc +import contextlib +import dataclasses +import difflib +import enum +import errno +import faulthandler +import getpass +import inspect +import io +import itertools +import json +import os +import random +import re +import shlex +import shutil +import signal +import stat +import subprocess +import sys +import tempfile +import textwrap +import typing +from typing import Any, AnyStr, BinaryIO, Callable, ContextManager, IO, Iterator, List, Mapping, MutableMapping, MutableSequence, NoReturn, Optional, Sequence, Text, TextIO, Tuple, Type, Union +import unittest +from unittest import mock # pylint: disable=unused-import Allow absltest.mock. +from urllib import parse + +from absl import app # pylint: disable=g-import-not-at-top +from absl import flags +from absl import logging +from absl.testing import _pretty_print_reporter +from absl.testing import xml_reporter + +# Use an if-type-checking block to prevent leakage of type-checking only +# symbols. We don't want people relying on these at runtime. +if typing.TYPE_CHECKING: + # Unbounded TypeVar for general usage + _T = typing.TypeVar('_T') + + import unittest.case # pylint: disable=g-import-not-at-top,g-bad-import-order + + _OutcomeType = unittest.case._Outcome # pytype: disable=module-attr + + +# pylint: enable=g-import-not-at-top + +# Re-export a bunch of unittest functions we support so that people don't +# have to import unittest to get them +# pylint: disable=invalid-name +skip = unittest.skip +skipIf = unittest.skipIf +skipUnless = unittest.skipUnless +SkipTest = unittest.SkipTest +expectedFailure = unittest.expectedFailure +# pylint: enable=invalid-name + +# End unittest re-exports + +FLAGS = flags.FLAGS + +_TEXT_OR_BINARY_TYPES = (str, bytes) + +# Suppress surplus entries in AssertionError stack traces. +__unittest = True # pylint: disable=invalid-name + + +def expectedFailureIf(condition, reason): # pylint: disable=invalid-name + """Expects the test to fail if the run condition is True. + + Example usage:: + + @expectedFailureIf(sys.version.major == 2, "Not yet working in py2") + def test_foo(self): + ... + + Args: + condition: bool, whether to expect failure or not. + reason: Text, the reason to expect failure. + Returns: + Decorator function + """ + del reason # Unused + if condition: + return unittest.expectedFailure + else: + return lambda f: f + + +class TempFileCleanup(enum.Enum): + # Always cleanup temp files when the test completes. + ALWAYS = 'always' + # Only cleanup temp file if the test passes. This allows easier inspection + # of tempfile contents on test failure. absltest.TEST_TMPDIR.value determines + # where tempfiles are created. + SUCCESS = 'success' + # Never cleanup temp files. + OFF = 'never' + + +# Many of the methods in this module have names like assertSameElements. +# This kind of name does not comply with PEP8 style, +# but it is consistent with the naming of methods in unittest.py. +# pylint: disable=invalid-name + + +def _get_default_test_random_seed(): + # type: () -> int + random_seed = 301 + value = os.environ.get('TEST_RANDOM_SEED', '') + try: + random_seed = int(value) + except ValueError: + pass + return random_seed + + +def get_default_test_srcdir(): + # type: () -> Text + """Returns default test source dir.""" + return os.environ.get('TEST_SRCDIR', '') + + +def get_default_test_tmpdir(): + # type: () -> Text + """Returns default test temp dir.""" + tmpdir = os.environ.get('TEST_TMPDIR', '') + if not tmpdir: + tmpdir = os.path.join(tempfile.gettempdir(), 'absl_testing') + + return tmpdir + + +def _get_default_randomize_ordering_seed(): + # type: () -> int + """Returns default seed to use for randomizing test order. + + This function first checks the --test_randomize_ordering_seed flag, and then + the TEST_RANDOMIZE_ORDERING_SEED environment variable. If the first value + we find is: + * (not set): disable test randomization + * 0: disable test randomization + * 'random': choose a random seed in [1, 4294967295] for test order + randomization + * positive integer: use this seed for test order randomization + + (The values used are patterned after + https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED). + + In principle, it would be simpler to return None if no override is provided; + however, the python random module has no `get_seed()`, only `getstate()`, + which returns far more data than we want to pass via an environment variable + or flag. + + Returns: + A default value for test case randomization (int). 0 means do not randomize. + + Raises: + ValueError: Raised when the flag or env value is not one of the options + above. + """ + if FLAGS['test_randomize_ordering_seed'].present: + randomize = FLAGS.test_randomize_ordering_seed + elif 'TEST_RANDOMIZE_ORDERING_SEED' in os.environ: + randomize = os.environ['TEST_RANDOMIZE_ORDERING_SEED'] + else: + randomize = '' + if not randomize: + return 0 + if randomize == 'random': + return random.Random().randint(1, 4294967295) + if randomize == '0': + return 0 + try: + seed = int(randomize) + if seed > 0: + return seed + except ValueError: + pass + raise ValueError( + 'Unknown test randomization seed value: {}'.format(randomize)) + + +TEST_SRCDIR = flags.DEFINE_string( + 'test_srcdir', + get_default_test_srcdir(), + 'Root of directory tree where source files live', + allow_override_cpp=True) +TEST_TMPDIR = flags.DEFINE_string( + 'test_tmpdir', + get_default_test_tmpdir(), + 'Directory for temporary testing files', + allow_override_cpp=True) + +flags.DEFINE_integer( + 'test_random_seed', + _get_default_test_random_seed(), + 'Random seed for testing. Some test frameworks may ' + 'change the default value of this flag between runs, so ' + 'it is not appropriate for seeding probabilistic tests.', + allow_override_cpp=True) +flags.DEFINE_string( + 'test_randomize_ordering_seed', + '', + 'If positive, use this as a seed to randomize the ' + 'execution order for test cases. If "random", pick a ' + 'random seed to use. If 0 or not set, do not randomize ' + 'test case execution order. This flag also overrides ' + 'the TEST_RANDOMIZE_ORDERING_SEED environment variable.', + allow_override_cpp=True) +flags.DEFINE_string('xml_output_file', '', 'File to store XML test results') + + +# We might need to monkey-patch TestResult so that it stops considering an +# unexpected pass as a as a "successful result". For details, see +# http://bugs.python.org/issue20165 +def _monkey_patch_test_result_for_unexpected_passes(): + # type: () -> None + """Workaround for .""" + + def wasSuccessful(self): + # type: () -> bool + """Tells whether or not this result was a success. + + Any unexpected pass is to be counted as a non-success. + + Args: + self: The TestResult instance. + + Returns: + Whether or not this result was a success. + """ + return (len(self.failures) == len(self.errors) == + len(self.unexpectedSuccesses) == 0) + + test_result = unittest.TestResult() + test_result.addUnexpectedSuccess(unittest.FunctionTestCase(lambda: None)) + if test_result.wasSuccessful(): # The bug is present. + unittest.TestResult.wasSuccessful = wasSuccessful + if test_result.wasSuccessful(): # Warn the user if our hot-fix failed. + sys.stderr.write('unittest.result.TestResult monkey patch to report' + ' unexpected passes as failures did not work.\n') + + +_monkey_patch_test_result_for_unexpected_passes() + + +def _open(filepath, mode, _open_func=open): + # type: (Text, Text, Callable[..., IO]) -> IO + """Opens a file. + + Like open(), but ensure that we can open real files even if tests stub out + open(). + + Args: + filepath: A filepath. + mode: A mode. + _open_func: A built-in open() function. + + Returns: + The opened file object. + """ + return _open_func(filepath, mode, encoding='utf-8') + + +class _TempDir(object): + """Represents a temporary directory for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Module-private: do not instantiate outside module.""" + self._path = path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the directory. + + TIP: Instead of e.g. `os.path.join(temp_dir.full_path)`, you can simply + do `os.path.join(temp_dir)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def create_file(self, file_path=None, content=None, mode='w', encoding='utf8', + errors='strict'): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text) -> _TempFile + """Create a file in the directory. + + NOTE: If the file already exists, it will be made writable and overwritten. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path + is the path that will be cleaned up, including any directories in the + path, e.g., 'foo/bar/baz.txt' will `rm -r foo` + content: Optional string or bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + + Returns: + A _TempFile representing the created file. + """ + tf, _ = _TempFile._create(self._path, file_path, content, mode, encoding, + errors) + return tf + + def mkdir(self, dir_path=None): + # type: (Optional[Text]) -> _TempDir + """Create a directory in the directory. + + Args: + dir_path: Optional path to the directory to create. If not given, + a unique name will be generated and used. + + Returns: + A _TempDir representing the created directory. + """ + if dir_path: + path = os.path.join(self._path, dir_path) + else: + path = tempfile.mkdtemp(dir=self._path) + + # Note: there's no need to clear the directory since the containing + # dir was cleared by the tempdir() function. + os.makedirs(path, exist_ok=True) + return _TempDir(path) + + +class _TempFile(object): + """Represents a tempfile for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Private: use _create instead.""" + self._path = path + + # pylint: disable=line-too-long + @classmethod + def _create(cls, base_path, file_path, content, mode, encoding, errors): + # type: (Text, Optional[Text], AnyStr, Text, Text, Text) -> Tuple[_TempFile, Text] + # pylint: enable=line-too-long + """Module-private: create a tempfile instance.""" + if file_path: + cleanup_path = os.path.join(base_path, _get_first_part(file_path)) + path = os.path.join(base_path, file_path) + os.makedirs(os.path.dirname(path), exist_ok=True) + # The file may already exist, in which case, ensure it's writable so that + # it can be truncated. + if os.path.exists(path) and not os.access(path, os.W_OK): + stat_info = os.stat(path) + os.chmod(path, stat_info.st_mode | stat.S_IWUSR) + else: + os.makedirs(base_path, exist_ok=True) + fd, path = tempfile.mkstemp(dir=str(base_path)) + os.close(fd) + cleanup_path = path + + tf = cls(path) + + if content: + if isinstance(content, str): + tf.write_text(content, mode=mode, encoding=encoding, errors=errors) + else: + tf.write_bytes(content, mode) + + else: + tf.write_bytes(b'') + + return tf, cleanup_path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the file. + + TIP: Instead of e.g. `os.path.join(temp_file.full_path)`, you can simply + do `os.path.join(temp_file)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def read_text(self, encoding='utf8', errors='strict'): + # type: (Text, Text) -> Text + """Return the contents of the file as text.""" + with self.open_text(encoding=encoding, errors=errors) as fp: + return fp.read() + + def read_bytes(self): + # type: () -> bytes + """Return the content of the file as bytes.""" + with self.open_bytes() as fp: + return fp.read() + + def write_text(self, text, mode='w', encoding='utf8', errors='strict'): + # type: (Text, Text, Text, Text) -> None + """Write text to the file. + + Args: + text: Text to write. In Python 2, it can be bytes, which will be + decoded using the `encoding` arg (this is as an aid for code that + is 2 and 3 compatible). + mode: The mode to open the file for writing. + encoding: The encoding to use when writing the text to the file. + errors: The error handling strategy to use when converting text to bytes. + """ + with self.open_text(mode, encoding=encoding, errors=errors) as fp: + fp.write(text) + + def write_bytes(self, data, mode='wb'): + # type: (bytes, Text) -> None + """Write bytes to the file. + + Args: + data: bytes to write. + mode: Mode to open the file for writing. The "b" flag is implicit if + not already present. It must not have the "t" flag. + """ + with self.open_bytes(mode) as fp: + fp.write(data) + + def open_text(self, mode='rt', encoding='utf8', errors='strict'): + # type: (Text, Text, Text) -> ContextManager[TextIO] + """Return a context manager for opening the file in text mode. + + Args: + mode: The mode to open the file in. The "t" flag is implicit if not + already present. It must not have the "b" flag. + encoding: The encoding to use when opening the file. + errors: How to handle decoding errors. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 'b' in mode: + raise ValueError('Invalid mode {!r}: "b" flag not allowed when opening ' + 'file in text mode'.format(mode)) + if 't' not in mode: + mode += 't' + cm = self._open(mode, encoding, errors) + return cm + + def open_bytes(self, mode='rb'): + # type: (Text) -> ContextManager[BinaryIO] + """Return a context manager for opening the file in binary mode. + + Args: + mode: The mode to open the file in. The "b" mode is implicit if not + already present. It must not have the "t" flag. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 't' in mode: + raise ValueError('Invalid mode {!r}: "t" flag not allowed when opening ' + 'file in binary mode'.format(mode)) + if 'b' not in mode: + mode += 'b' + cm = self._open(mode, encoding=None, errors=None) + return cm + + # TODO(b/123775699): Once pytype supports typing.Literal, use overload and + # Literal to express more precise return types. The contained type is + # currently `Any` to avoid [bad-return-type] errors in the open_* methods. + @contextlib.contextmanager + def _open( + self, + mode: str, + encoding: Optional[str] = 'utf8', + errors: Optional[str] = 'strict', + ) -> Iterator[Any]: + with io.open( + self.full_path, mode=mode, encoding=encoding, errors=errors) as fp: + yield fp + + +class _method(object): + """A decorator that supports both instance and classmethod invocations. + + Using similar semantics to the @property builtin, this decorator can augment + an instance method to support conditional logic when invoked on a class + object. This breaks support for invoking an instance method via the class + (e.g. Cls.method(self, ...)) but is still situationally useful. + """ + + def __init__(self, finstancemethod): + # type: (Callable[..., Any]) -> None + self._finstancemethod = finstancemethod + self._fclassmethod = None + + def classmethod(self, fclassmethod): + # type: (Callable[..., Any]) -> _method + self._fclassmethod = classmethod(fclassmethod) + return self + + def __doc__(self): + # type: () -> str + if getattr(self._finstancemethod, '__doc__'): + return self._finstancemethod.__doc__ + elif getattr(self._fclassmethod, '__doc__'): + return self._fclassmethod.__doc__ + return '' + + def __get__(self, obj, type_): + # type: (Optional[Any], Optional[Type[Any]]) -> Callable[..., Any] + func = self._fclassmethod if obj is None else self._finstancemethod + return func.__get__(obj, type_) # pytype: disable=attribute-error + + +class TestCase(unittest.TestCase): + """Extension of unittest.TestCase providing more power.""" + + # When to cleanup files/directories created by our `create_tempfile()` and + # `create_tempdir()` methods after each test case completes. This does *not* + # affect e.g., files created outside of those methods, e.g., using the stdlib + # tempfile module. This can be overridden at the class level, instance level, + # or with the `cleanup` arg of `create_tempfile()` and `create_tempdir()`. See + # `TempFileCleanup` for details on the different values. + # TODO(b/70517332): Remove the type comment and the disable once pytype has + # better support for enums. + tempfile_cleanup = TempFileCleanup.ALWAYS # type: TempFileCleanup # pytype: disable=annotation-type-mismatch + + maxDiff = 80 * 20 + longMessage = True + + # Exit stacks for per-test and per-class scopes. + if sys.version_info < (3, 11): + _exit_stack = None + _cls_exit_stack = None + + def __init__(self, *args, **kwargs): + super(TestCase, self).__init__(*args, **kwargs) + # This is to work around missing type stubs in unittest.pyi + self._outcome = getattr(self, '_outcome') # type: Optional[_OutcomeType] + + def setUp(self): + super(TestCase, self).setUp() + # NOTE: Only Python 3 contextlib has ExitStack and + # Python 3.11+ already has enterContext. + if hasattr(contextlib, 'ExitStack') and sys.version_info < (3, 11): + self._exit_stack = contextlib.ExitStack() + self.addCleanup(self._exit_stack.close) + + @classmethod + def setUpClass(cls): + super(TestCase, cls).setUpClass() + # NOTE: Only Python 3 contextlib has ExitStack, only Python 3.8+ has + # addClassCleanup and Python 3.11+ already has enterClassContext. + if ( + hasattr(contextlib, 'ExitStack') + and hasattr(cls, 'addClassCleanup') + and sys.version_info < (3, 11) + ): + cls._cls_exit_stack = contextlib.ExitStack() + cls.addClassCleanup(cls._cls_exit_stack.close) + + def create_tempdir(self, name=None, cleanup=None): + # type: (Optional[Text], Optional[TempFileCleanup]) -> _TempDir + """Create a temporary directory specific to the test. + + NOTE: The directory and its contents will be recursively cleared before + creation. This ensures that there is no pre-existing state. + + This creates a named directory on disk that is isolated to this test, and + will be properly cleaned up by the test. This avoids several pitfalls of + creating temporary directories for test purposes, as well as makes it easier + to setup directories and verify their contents. For example:: + + def test_foo(self): + out_dir = self.create_tempdir() + out_log = out_dir.create_file('output.log') + expected_outputs = [ + os.path.join(out_dir, 'data-0.txt'), + os.path.join(out_dir, 'data-1.txt'), + ] + code_under_test(out_dir) + self.assertTrue(os.path.exists(expected_paths[0])) + self.assertTrue(os.path.exists(expected_paths[1])) + self.assertEqual('foo', out_log.read_text()) + + See also: :meth:`create_tempfile` for creating temporary files. + + Args: + name: Optional name of the directory. If not given, a unique + name will be generated and used. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempDir representing the created directory; see _TempDir class docs + for usage. + """ + test_path = self._get_tempdir_path_test() + + if name: + path = os.path.join(test_path, name) + cleanup_path = os.path.join(test_path, _get_first_part(name)) + else: + os.makedirs(test_path, exist_ok=True) + path = tempfile.mkdtemp(dir=test_path) + cleanup_path = path + + _rmtree_ignore_errors(cleanup_path) + os.makedirs(path, exist_ok=True) + + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + + return _TempDir(path) + + # pylint: disable=line-too-long + def create_tempfile(self, file_path=None, content=None, mode='w', + encoding='utf8', errors='strict', cleanup=None): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text, Optional[TempFileCleanup]) -> _TempFile + # pylint: enable=line-too-long + """Create a temporary file specific to the test. + + This creates a named file on disk that is isolated to this test, and will + be properly cleaned up by the test. This avoids several pitfalls of + creating temporary files for test purposes, as well as makes it easier + to setup files, their data, read them back, and inspect them when + a test fails. For example:: + + def test_foo(self): + output = self.create_tempfile() + code_under_test(output) + self.assertGreater(os.path.getsize(output), 0) + self.assertEqual('foo', output.read_text()) + + NOTE: This will zero-out the file. This ensures there is no pre-existing + state. + NOTE: If the file already exists, it will be made writable and overwritten. + + See also: :meth:`create_tempdir` for creating temporary directories, and + ``_TempDir.create_file`` for creating files within a temporary directory. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path is + the path that will be cleaned up, including any directories in the path, + e.g., ``'foo/bar/baz.txt'`` will ``rm -r foo``. + content: Optional string or + bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempFile representing the created file; see _TempFile class docs for + usage. + """ + test_path = self._get_tempdir_path_test() + tf, cleanup_path = _TempFile._create(test_path, file_path, content=content, + mode=mode, encoding=encoding, + errors=errors) + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + return tf + + @_method + def enter_context(self, manager): + # type: (ContextManager[_T]) -> _T + """Returns the CM's value after registering it with the exit stack. + + Entering a context pushes it onto a stack of contexts. When `enter_context` + is called on the test instance (e.g. `self.enter_context`), the context is + exited after the test case's tearDown call. When called on the test class + (e.g. `TestCase.enter_context`), the context is exited after the test + class's tearDownClass call. + + Contexts are exited in the reverse order of entering. They will always + be exited, regardless of test failure/success. + + This is useful to eliminate per-test boilerplate when context managers + are used. For example, instead of decorating every test with `@mock.patch`, + simply do `self.foo = self.enter_context(mock.patch(...))' in `setUp()`. + + NOTE: The context managers will always be exited without any error + information. This is an unfortunate implementation detail due to some + internals of how unittest runs tests. + + Args: + manager: The context manager to enter. + """ + if sys.version_info >= (3, 11): + return self.enterContext(manager) + + if not self._exit_stack: + raise AssertionError( + 'self._exit_stack is not set: enter_context is Py3-only; also make ' + 'sure that AbslTest.setUp() is called.') + return self._exit_stack.enter_context(manager) + + @enter_context.classmethod + def enter_context(cls, manager): # pylint: disable=no-self-argument + # type: (ContextManager[_T]) -> _T + if sys.version_info >= (3, 11): + return cls.enterClassContext(manager) + + if not cls._cls_exit_stack: + raise AssertionError( + 'cls._cls_exit_stack is not set: cls.enter_context requires ' + 'Python 3.8+; also make sure that AbslTest.setUpClass() is called.') + return cls._cls_exit_stack.enter_context(manager) + + @classmethod + def _get_tempdir_path_cls(cls): + # type: () -> Text + return os.path.join(TEST_TMPDIR.value, + cls.__qualname__.replace('__main__.', '')) + + def _get_tempdir_path_test(self): + # type: () -> Text + return os.path.join(self._get_tempdir_path_cls(), self._testMethodName) + + def _get_tempfile_cleanup(self, override): + # type: (Optional[TempFileCleanup]) -> TempFileCleanup + if override is not None: + return override + return self.tempfile_cleanup + + def _maybe_add_temp_path_cleanup(self, path, cleanup): + # type: (Text, Optional[TempFileCleanup]) -> None + cleanup = self._get_tempfile_cleanup(cleanup) + if cleanup == TempFileCleanup.OFF: + return + elif cleanup == TempFileCleanup.ALWAYS: + self.addCleanup(_rmtree_ignore_errors, path) + elif cleanup == TempFileCleanup.SUCCESS: + self._internal_add_cleanup_on_success(_rmtree_ignore_errors, path) + else: + raise AssertionError('Unexpected cleanup value: {}'.format(cleanup)) + + def _internal_add_cleanup_on_success( + self, + function: Callable[..., Any], + *args: Any, + **kwargs: Any, + ) -> None: + """Adds `function` as cleanup when the test case succeeds.""" + outcome = self._outcome + assert outcome is not None + previous_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + def _call_cleaner_on_success(*args, **kwargs): + if not self._internal_ran_and_passed_when_called_during_cleanup( + previous_failure_count): + return + function(*args, **kwargs) + self.addCleanup(_call_cleaner_on_success, *args, **kwargs) + + def _internal_ran_and_passed_when_called_during_cleanup( + self, + previous_failure_count: int, + ) -> bool: + """Returns whether test is passed. Expected to be called during cleanup.""" + outcome = self._outcome + if sys.version_info[:2] >= (3, 11): + assert outcome is not None + current_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + return current_failure_count == previous_failure_count + else: + # Before Python 3.11 https://github.com/python/cpython/pull/28180, errors + # were bufferred in _Outcome before calling cleanup. + result = self.defaultTestResult() + self._feedErrorsToResult(result, outcome.errors) # pytype: disable=attribute-error + return result.wasSuccessful() + + def shortDescription(self): + # type: () -> Text + """Formats both the test method name and the first line of its docstring. + + If no docstring is given, only returns the method name. + + This method overrides unittest.TestCase.shortDescription(), which + only returns the first line of the docstring, obscuring the name + of the test upon failure. + + Returns: + desc: A short description of a test method. + """ + desc = self.id() + + # Omit the main name so that test name can be directly copy/pasted to + # the command line. + if desc.startswith('__main__.'): + desc = desc[len('__main__.'):] + + # NOTE: super() is used here instead of directly invoking + # unittest.TestCase.shortDescription(self), because of the + # following line that occurs later on: + # unittest.TestCase = TestCase + # Because of this, direct invocation of what we think is the + # superclass will actually cause infinite recursion. + doc_first_line = super(TestCase, self).shortDescription() + if doc_first_line is not None: + desc = '\n'.join((desc, doc_first_line)) + return desc + + def assertStartsWith(self, actual, expected_start, msg=None): + """Asserts that actual.startswith(expected_start) is True. + + Args: + actual: str + expected_start: str + msg: Optional message to report on failure. + """ + if not actual.startswith(expected_start): + self.fail('%r does not start with %r' % (actual, expected_start), msg) + + def assertNotStartsWith(self, actual, unexpected_start, msg=None): + """Asserts that actual.startswith(unexpected_start) is False. + + Args: + actual: str + unexpected_start: str + msg: Optional message to report on failure. + """ + if actual.startswith(unexpected_start): + self.fail('%r does start with %r' % (actual, unexpected_start), msg) + + def assertEndsWith(self, actual, expected_end, msg=None): + """Asserts that actual.endswith(expected_end) is True. + + Args: + actual: str + expected_end: str + msg: Optional message to report on failure. + """ + if not actual.endswith(expected_end): + self.fail('%r does not end with %r' % (actual, expected_end), msg) + + def assertNotEndsWith(self, actual, unexpected_end, msg=None): + """Asserts that actual.endswith(unexpected_end) is False. + + Args: + actual: str + unexpected_end: str + msg: Optional message to report on failure. + """ + if actual.endswith(unexpected_end): + self.fail('%r does end with %r' % (actual, unexpected_end), msg) + + def assertSequenceStartsWith(self, prefix, whole, msg=None): + """An equality assertion for the beginning of ordered sequences. + + If prefix is an empty sequence, it will raise an error unless whole is also + an empty sequence. + + If prefix is not a sequence, it will raise an error if the first element of + whole does not match. + + Args: + prefix: A sequence expected at the beginning of the whole parameter. + whole: The sequence in which to look for prefix. + msg: Optional message to report on failure. + """ + try: + prefix_len = len(prefix) + except (TypeError, NotImplementedError): + prefix = [prefix] + prefix_len = 1 + + if isinstance(whole, abc.Mapping) or isinstance(whole, abc.Set): + self.fail( + 'For whole: Mapping or Set objects are not supported, found type: %s' + % type(whole), + msg, + ) + try: + whole_len = len(whole) + except (TypeError, NotImplementedError): + self.fail('For whole: len(%s) is not supported, it appears to be type: ' + '%s' % (whole, type(whole)), msg) + + assert prefix_len <= whole_len, self._formatMessage( + msg, + 'Prefix length (%d) is longer than whole length (%d).' % + (prefix_len, whole_len) + ) + + if not prefix_len and whole_len: + self.fail('Prefix length is 0 but whole length is %d: %s' % + (len(whole), whole), msg) + + try: + self.assertSequenceEqual(prefix, whole[:prefix_len], msg) + except AssertionError: + self.fail('prefix: %s not found at start of whole: %s.' % + (prefix, whole), msg) + + def assertEmpty(self, container, msg=None): + """Asserts that an object has zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of {}.'.format(container, len(container)), msg) + + def assertNotEmpty(self, container, msg=None): + """Asserts that an object has non-zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if not len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of 0.'.format(container), msg) + + def assertLen(self, container, expected_len, msg=None): + """Asserts that an object has the expected length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + expected_len: The expected length of the container. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + if len(container) != expected_len: + container_repr = unittest.util.safe_repr(container) # pytype: disable=module-attr + self.fail('{} has length of {}, expected {}.'.format( + container_repr, len(container), expected_len), msg) + + def assertSequenceAlmostEqual(self, expected_seq, actual_seq, places=None, + msg=None, delta=None): + """An approximate equality assertion for ordered sequences. + + Fail if the two sequences are unequal as determined by their value + differences rounded to the given number of decimal places (default 7) and + comparing to zero, or by comparing that the difference between each value + in the two sequences is more than the given delta. + + Note that decimal places (from zero) are usually not the same as significant + digits (measured from the most significant digit). + + If the two sequences compare equal then they will automatically compare + almost equal. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + places: The number of decimal places to compare. + msg: The message to be printed if the test fails. + delta: The OK difference between compared values. + """ + if len(expected_seq) != len(actual_seq): + self.fail('Sequence size mismatch: {} vs {}'.format( + len(expected_seq), len(actual_seq)), msg) + + err_list = [] + for idx, (exp_elem, act_elem) in enumerate(zip(expected_seq, actual_seq)): + try: + # assertAlmostEqual should be called with at most one of `places` and + # `delta`. However, it's okay for assertSequenceAlmostEqual to pass + # both because we want the latter to fail if the former does. + # pytype: disable=wrong-keyword-args + self.assertAlmostEqual(exp_elem, act_elem, places=places, msg=msg, + delta=delta) + # pytype: enable=wrong-keyword-args + except self.failureException as err: + err_list.append('At index {}: {}'.format(idx, err)) + + if err_list: + if len(err_list) > 30: + err_list = err_list[:30] + ['...'] + msg = self._formatMessage(msg, '\n'.join(err_list)) + self.fail(msg) + + def assertContainsSubset(self, expected_subset, actual_set, msg=None): + """Checks whether actual iterable is a superset of expected iterable.""" + missing = set(expected_subset) - set(actual_set) + if not missing: + return + + self.fail('Missing elements %s\nExpected: %s\nActual: %s' % ( + missing, expected_subset, actual_set), msg) + + def assertNoCommonElements(self, expected_seq, actual_seq, msg=None): + """Checks whether actual iterable and expected iterable are disjoint.""" + common = set(expected_seq) & set(actual_seq) + if not common: + return + + self.fail('Common elements %s\nExpected: %s\nActual: %s' % ( + common, expected_seq, actual_seq), msg) + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + """Deprecated, please use assertCountEqual instead. + + This is equivalent to assertCountEqual. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + super().assertCountEqual(expected_seq, actual_seq, msg) + + def assertSameElements(self, expected_seq, actual_seq, msg=None): + """Asserts that two sequences have the same elements (in any order). + + This method, unlike assertCountEqual, doesn't care about any + duplicates in the expected and actual sequences:: + + # Doesn't raise an AssertionError + assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) + + If possible, you should use assertCountEqual instead of + assertSameElements. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + # `unittest2.TestCase` used to have assertSameElements, but it was + # removed in favor of assertItemsEqual. As there's a unit test + # that explicitly checks this behavior, I am leaving this method + # alone. + # Fail on strings: empirically, passing strings to this test method + # is almost always a bug. If comparing the character sets of two strings + # is desired, cast the inputs to sets or lists explicitly. + if (isinstance(expected_seq, _TEXT_OR_BINARY_TYPES) or + isinstance(actual_seq, _TEXT_OR_BINARY_TYPES)): + self.fail('Passing string/bytes to assertSameElements is usually a bug. ' + 'Did you mean to use assertEqual?\n' + 'Expected: %s\nActual: %s' % (expected_seq, actual_seq)) + try: + expected = dict([(element, None) for element in expected_seq]) + actual = dict([(element, None) for element in actual_seq]) + missing = [element for element in expected if element not in actual] + unexpected = [element for element in actual if element not in expected] + missing.sort() + unexpected.sort() + except TypeError: + # Fall back to slower list-compare if any of the objects are + # not hashable. + expected = list(expected_seq) + actual = list(actual_seq) + expected.sort() + actual.sort() + missing, unexpected = _sorted_list_difference(expected, actual) + errors = [] + if msg: + errors.extend((msg, ':\n')) + if missing: + errors.append('Expected, but missing:\n %r\n' % missing) + if unexpected: + errors.append('Unexpected, but present:\n %r\n' % unexpected) + if missing or unexpected: + self.fail(''.join(errors)) + + # unittest.TestCase.assertMultiLineEqual works very similarly, but it + # has a different error format. However, I find this slightly more readable. + def assertMultiLineEqual(self, first, second, msg=None, **kwargs): + """Asserts that two multi-line strings are equal.""" + assert isinstance(first, + str), ('First argument is not a string: %r' % (first,)) + assert isinstance(second, + str), ('Second argument is not a string: %r' % (second,)) + line_limit = kwargs.pop('line_limit', 0) + if kwargs: + raise TypeError('Unexpected keyword args {}'.format(tuple(kwargs))) + + if first == second: + return + if msg: + failure_message = [msg + ':\n'] + else: + failure_message = ['\n'] + if line_limit: + line_limit += len(failure_message) + for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)): + failure_message.append(line) + if not line.endswith('\n'): + failure_message.append('\n') + if line_limit and len(failure_message) > line_limit: + n_omitted = len(failure_message) - line_limit + failure_message = failure_message[:line_limit] + failure_message.append( + '(... and {} more delta lines omitted for brevity.)\n'.format( + n_omitted)) + + raise self.failureException(''.join(failure_message)) + + def assertBetween(self, value, minv, maxv, msg=None): + """Asserts that value is between minv and maxv (inclusive).""" + msg = self._formatMessage(msg, + '"%r" unexpectedly not between "%r" and "%r"' % + (value, minv, maxv)) + self.assertTrue(minv <= value, msg) + self.assertTrue(maxv >= value, msg) + + def assertRegexMatch(self, actual_str, regexes, message=None): + r"""Asserts that at least one regex in regexes matches str. + + If possible you should use `assertRegex`, which is a simpler + version of this method. `assertRegex` takes a single regular + expression (a string or re compiled object) instead of a list. + + Notes: + + 1. This function uses substring matching, i.e. the matching + succeeds if *any* substring of the error message matches *any* + regex in the list. This is more convenient for the user than + full-string matching. + + 2. If regexes is the empty list, the matching will always fail. + + 3. Use regexes=[''] for a regex that will always pass. + + 4. '.' matches any single character *except* the newline. To + match any character, use '(.|\n)'. + + 5. '^' matches the beginning of each line, not just the beginning + of the string. Similarly, '$' matches the end of each line. + + 6. An exception will be thrown if regexes contains an invalid + regex. + + Args: + actual_str: The string we try to match with the items in regexes. + regexes: The regular expressions we want to match against str. + See "Notes" above for detailed notes on how this is interpreted. + message: The message to be printed if the test fails. + """ + if isinstance(regexes, _TEXT_OR_BINARY_TYPES): + self.fail('regexes is string or bytes; use assertRegex instead.', + message) + if not regexes: + self.fail('No regexes specified.', message) + + regex_type = type(regexes[0]) + for regex in regexes[1:]: + if type(regex) is not regex_type: # pylint: disable=unidiomatic-typecheck + self.fail('regexes list must all be the same type.', message) + + if regex_type is bytes and isinstance(actual_str, str): + regexes = [regex.decode('utf-8') for regex in regexes] + regex_type = str + elif regex_type is str and isinstance(actual_str, bytes): + regexes = [regex.encode('utf-8') for regex in regexes] + regex_type = bytes + + if regex_type is str: + regex = u'(?:%s)' % u')|(?:'.join(regexes) + elif regex_type is bytes: + regex = b'(?:' + (b')|(?:'.join(regexes)) + b')' + else: + self.fail('Only know how to deal with unicode str or bytes regexes.', + message) + + if not re.search(regex, actual_str, re.MULTILINE): + self.fail('"%s" does not contain any of these regexes: %s.' % + (actual_str, regexes), message) + + def assertCommandSucceeds(self, command, regexes=(b'',), env=None, + close_fds=True, msg=None): + """Asserts that a shell command succeeds (i.e. exits with code 0). + + Args: + command: List or string representing the command to run. + regexes: List of regular expression byte strings that match success. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertEqual( + ret_code, 0, + self._formatMessage(msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s' % (_quote_long_string(command_string), + ret_code, + _quote_long_string(err))) + ) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + def assertCommandFails(self, command, regexes, env=None, close_fds=True, + msg=None): + """Asserts a shell command fails and the error matches a regex in a list. + + Args: + command: List or string representing the command to run. + regexes: the list of regular expression strings. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertNotEqual( + ret_code, 0, + self._formatMessage(msg, 'The following command succeeded ' + 'while expected to fail:\n%s' % + _quote_long_string(command_string))) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + class _AssertRaisesContext(object): + + def __init__(self, expected_exception, test_case, test_func, msg=None): + self.expected_exception = expected_exception + self.test_case = test_case + self.test_func = test_func + self.msg = msg + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + self.test_case.fail(self.expected_exception.__name__ + ' not raised', + self.msg) + if not issubclass(exc_type, self.expected_exception): + return False + self.test_func(exc_value) + if exc_value: + self.exception = exc_value.with_traceback(None) + return True + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate, callable_obj: Callable[..., Any], + *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithPredicateMatch(self, expected_exception, predicate, + callable_obj=None, *args, **kwargs): + """Asserts that exception is thrown and predicate(exception) is true. + + Args: + expected_exception: Exception class expected to be raised. + predicate: Function of one argument that inspects the passed-in exception + and returns True (success) or False (please fail the test). + callable_obj: Function to be called. + *args: Extra args. + **kwargs: Extra keyword args. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + self.assertTrue(predicate(err), + '%r does not match predicate %r' % (err, predicate)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message + ) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message, + callable_obj: Callable[..., Any], *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithLiteralMatch(self, expected_exception, + expected_exception_message, + callable_obj=None, *args, **kwargs): + """Asserts that the message in a raised exception equals the given string. + + Unlike assertRaisesRegex, this method takes a literal string, not + a regular expression. + + with self.assertRaisesWithLiteralMatch(ExType, 'message'): + DoSomething() + + Args: + expected_exception: Exception class expected to be raised. + expected_exception_message: String message expected in the raised + exception. For a raise exception e, expected_exception_message must + equal str(e). + callable_obj: Function to be called, or None to return a context. + *args: Extra args. + **kwargs: Extra kwargs. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + actual_exception_message = str(err) + self.assertTrue(expected_exception_message == actual_exception_message, + 'Exception message does not match.\n' + 'Expected: %r\n' + 'Actual: %r' % (expected_exception_message, + actual_exception_message)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + def assertContainsInOrder(self, strings, target, msg=None): + """Asserts that the strings provided are found in the target in order. + + This may be useful for checking HTML output. + + Args: + strings: A list of strings, such as [ 'fox', 'dog' ] + target: A target string in which to look for the strings, such as + 'The quick brown fox jumped over the lazy dog'. + msg: Optional message to report on failure. + """ + if isinstance(strings, (bytes, unicode if str is bytes else str)): + strings = (strings,) + + current_index = 0 + last_string = None + for string in strings: + index = target.find(str(string), current_index) + if index == -1 and current_index == 0: + self.fail("Did not find '%s' in '%s'" % + (string, target), msg) + elif index == -1: + self.fail("Did not find '%s' after '%s' in '%s'" % + (string, last_string, target), msg) + last_string = string + current_index = index + + def assertContainsSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as a subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, but possibly with other elements interspersed. For example, [1, 2, 3] + is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be a subsequence of container. + msg: Optional message to report on failure. + """ + first_nonmatching = None + reversed_container = list(reversed(container)) + subsequence = list(subsequence) + + for e in subsequence: + if e not in reversed_container: + first_nonmatching = e + break + while e != reversed_container.pop(): + pass + + if first_nonmatching is not None: + self.fail('%s not a subsequence of %s. First non-matching element: %s' % + (subsequence, container, first_nonmatching), msg) + + def assertContainsExactSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as an exact subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, and without other elements interspersed. For example, [1, 2, 3] is an + exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be an exact subsequence of container. + msg: Optional message to report on failure. + """ + container = list(container) + subsequence = list(subsequence) + longest_match = 0 + + for start in range(1 + len(container) - len(subsequence)): + if longest_match == len(subsequence): + break + index = 0 + while (index < len(subsequence) and + subsequence[index] == container[start + index]): + index += 1 + longest_match = max(longest_match, index) + + if longest_match < len(subsequence): + self.fail('%s not an exact subsequence of %s. ' + 'Longest matching prefix: %s' % + (subsequence, container, subsequence[:longest_match]), msg) + + def assertTotallyOrdered(self, *groups, **kwargs): + """Asserts that total ordering has been implemented correctly. + + For example, say you have a class A that compares only on its attribute x. + Comparators other than ``__lt__`` are omitted for brevity:: + + class A(object): + def __init__(self, x, y): + self.x = x + self.y = y + + def __hash__(self): + return hash(self.x) + + def __lt__(self, other): + try: + return self.x < other.x + except AttributeError: + return NotImplemented + + assertTotallyOrdered will check that instances can be ordered correctly. + For example:: + + self.assertTotallyOrdered( + [None], # None should come before everything else. + [1], # Integers sort earlier. + [A(1, 'a')], + [A(2, 'b')], # 2 is after 1. + [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. + [A(4, 'z')], + ['foo']) # Strings sort last. + + Args: + *groups: A list of groups of elements. Each group of elements is a list + of objects that are equal. The elements in each group must be less + than the elements in the group after it. For example, these groups are + totally ordered: ``[None]``, ``[1]``, ``[2, 2]``, ``[3]``. + **kwargs: optional msg keyword argument can be passed. + """ + + def CheckOrder(small, big): + """Ensures small is ordered before big.""" + self.assertFalse(small == big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertTrue(small != big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertLess(small, big, msg) + self.assertFalse(big < small, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (big, small))) + self.assertLessEqual(small, big, msg) + self.assertFalse(big <= small, self._formatMessage( + '%r unexpectedly less than or equal to %r' % (big, small), msg + )) + self.assertGreater(big, small, msg) + self.assertFalse(small > big, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (small, big))) + self.assertGreaterEqual(big, small) + self.assertFalse(small >= big, self._formatMessage( + msg, + '%r unexpectedly greater than or equal to %r' % (small, big))) + + def CheckEqual(a, b): + """Ensures that a and b are equal.""" + self.assertEqual(a, b, msg) + self.assertFalse(a != b, + self._formatMessage(msg, '%r unexpectedly unequals %r' % + (a, b))) + + # Objects that compare equal must hash to the same value, but this only + # applies if both objects are hashable. + if (isinstance(a, abc.Hashable) and + isinstance(b, abc.Hashable)): + self.assertEqual( + hash(a), hash(b), + self._formatMessage( + msg, 'hash %d of %r unexpectedly not equal to hash %d of %r' % + (hash(a), a, hash(b), b))) + + self.assertFalse(a < b, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (a, b))) + self.assertFalse(b < a, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (b, a))) + self.assertLessEqual(a, b, msg) + self.assertLessEqual(b, a, msg) # pylint: disable=arguments-out-of-order + self.assertFalse(a > b, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (a, b))) + self.assertFalse(b > a, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (b, a))) + self.assertGreaterEqual(a, b, msg) + self.assertGreaterEqual(b, a, msg) # pylint: disable=arguments-out-of-order + + msg = kwargs.get('msg') + + # For every combination of elements, check the order of every pair of + # elements. + for elements in itertools.product(*groups): + elements = list(elements) + for index, small in enumerate(elements[:-1]): + for big in elements[index + 1:]: + CheckOrder(small, big) + + # Check that every element in each group is equal. + for group in groups: + for a in group: + CheckEqual(a, a) + for a, b in itertools.product(group, group): + CheckEqual(a, b) + + def assertDictEqual(self, a, b, msg=None): + """Raises AssertionError if a and b are not equal dictionaries. + + Args: + a: A dict, the expected value. + b: A dict, the actual value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dictionaries are not equal. + """ + self.assertIsInstance(a, dict, self._formatMessage( + msg, + 'First argument is not a dictionary' + )) + self.assertIsInstance(b, dict, self._formatMessage( + msg, + 'Second argument is not a dictionary' + )) + + def Sorted(list_of_items): + try: + return sorted(list_of_items) # In 3.3, unordered are possible. + except TypeError: + return list_of_items + + if a == b: + return + a_items = Sorted(list(a.items())) + b_items = Sorted(list(b.items())) + + unexpected = [] + missing = [] + different = [] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + + def Repr(dikt): + """Deterministic repr for dict.""" + # Sort the entries based on their repr, not based on their sort order, + # which will be non-deterministic across executions, for many types. + entries = sorted((safe_repr(k), safe_repr(v)) for k, v in dikt.items()) + return '{%s}' % (', '.join('%s: %s' % pair for pair in entries)) + + message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')] + + # The standard library default output confounds lexical difference with + # value difference; treat them separately. + for a_key, a_value in a_items: + if a_key not in b: + missing.append((a_key, a_value)) + elif a_value != b[a_key]: + different.append((a_key, a_value, b[a_key])) + + for b_key, b_value in b_items: + if b_key not in a: + unexpected.append((b_key, b_value)) + + if unexpected: + message.append( + 'Unexpected, but present entries:\n%s' % ''.join( + '%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected)) + + if different: + message.append( + 'repr() of differing entries:\n%s' % ''.join( + '%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value), + safe_repr(b_value)) + for k, a_value, b_value in different)) + + if missing: + message.append( + 'Missing entries:\n%s' % ''.join( + ('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing))) + + raise self.failureException('\n'.join(message)) + + def assertDataclassEqual(self, first, second, msg=None): + """Asserts two dataclasses are equal with more informative errors. + + Arguments must both be dataclasses. This compares equality of individual + fields and takes care to not compare fields that are marked as + non-comparable. It gives per field differences, which are easier to parse + than the comparison of the string representations from assertEqual. + + In cases where the dataclass has a custom __eq__, and it is defined in a + way that is inconsistent with equality of comparable fields, we raise an + exception without further trying to figure out how they are different. + + Args: + first: A dataclass, the first value. + second: A dataclass, the second value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dataclasses are not equal. + """ + + if not dataclasses.is_dataclass(first) or isinstance(first, type): + raise self.failureException('First argument is not a dataclass instance.') + if not dataclasses.is_dataclass(second) or isinstance(second, type): + raise self.failureException( + 'Second argument is not a dataclass instance.' + ) + + if first == second: + return + + if type(first) is not type(second): + self.fail( + 'Found different dataclass types: %s != %s' + % (type(first), type(second)), + msg, + ) + + # Make sure to skip fields that are marked compare=False. + different = [ + (f.name, getattr(first, f.name), getattr(second, f.name)) + for f in dataclasses.fields(first) + if f.compare and getattr(first, f.name) != getattr(second, f.name) + ] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + message = ['%s != %s' % (safe_repr(first), safe_repr(second))] + if different: + message.append('Fields that differ:') + message.extend( + '%s: %s != %s' % (k, safe_repr(first_v), safe_repr(second_v)) + for k, first_v, second_v in different + ) + else: + message.append( + 'Cannot detect difference by examining the fields of the dataclass.' + ) + + raise self.fail('\n'.join(message), msg) + + def assertUrlEqual(self, a, b, msg=None): + """Asserts that urls are equal, ignoring ordering of query params.""" + parsed_a = parse.urlparse(a) + parsed_b = parse.urlparse(b) + self.assertEqual(parsed_a.scheme, parsed_b.scheme, msg) + self.assertEqual(parsed_a.netloc, parsed_b.netloc, msg) + self.assertEqual(parsed_a.path, parsed_b.path, msg) + self.assertEqual(parsed_a.fragment, parsed_b.fragment, msg) + self.assertEqual(sorted(parsed_a.params.split(';')), + sorted(parsed_b.params.split(';')), msg) + self.assertDictEqual( + parse.parse_qs(parsed_a.query, keep_blank_values=True), + parse.parse_qs(parsed_b.query, keep_blank_values=True), msg) + + def assertSameStructure(self, a, b, aname='a', bname='b', msg=None): + """Asserts that two values contain the same structural content. + + The two arguments should be data trees consisting of trees of dicts and + lists. They will be deeply compared by walking into the contents of dicts + and lists; other items will be compared using the == operator. + If the two structures differ in content, the failure message will indicate + the location within the structures where the first difference is found. + This may be helpful when comparing large structures. + + Mixed Sequence and Set types are supported. Mixed Mapping types are + supported, but the order of the keys will not be considered in the + comparison. + + Args: + a: The first structure to compare. + b: The second structure to compare. + aname: Variable name to use for the first structure in assertion messages. + bname: Variable name to use for the second structure. + msg: Additional text to include in the failure message. + """ + + # Accumulate all the problems found so we can report all of them at once + # rather than just stopping at the first + problems = [] + + _walk_structure_for_problems(a, b, aname, bname, problems, + self.assertEqual, self.failureException) + + # Avoid spamming the user toooo much + if self.maxDiff is not None: + max_problems_to_show = self.maxDiff // 80 + if len(problems) > max_problems_to_show: + problems = problems[0:max_problems_to_show-1] + ['...'] + + if problems: + self.fail('; '.join(problems), msg) + + def assertJsonEqual(self, first, second, msg=None): + """Asserts that the JSON objects defined in two strings are equal. + + A summary of the differences will be included in the failure message + using assertSameStructure. + + Args: + first: A string containing JSON to decode and compare to second. + second: A string containing JSON to decode and compare to first. + msg: Additional text to include in the failure message. + """ + try: + first_structured = json.loads(first) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode first JSON value %s: %s' % (first, e))) + + try: + second_structured = json.loads(second) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode second JSON value %s: %s' % (second, e))) + + self.assertSameStructure(first_structured, second_structured, + aname='first', bname='second', msg=msg) + + def _getAssertEqualityFunc(self, first, second): + # type: (Any, Any) -> Callable[..., None] + try: + return super(TestCase, self)._getAssertEqualityFunc(first, second) + except AttributeError: + # This is a workaround if unittest.TestCase.__init__ was never run. + # It usually means that somebody created a subclass just for the + # assertions and has overridden __init__. "assertTrue" is a safe + # value that will not make __init__ raise a ValueError. + test_method = getattr(self, '_testMethodName', 'assertTrue') + super(TestCase, self).__init__(test_method) + + return super(TestCase, self)._getAssertEqualityFunc(first, second) + + def fail(self, msg=None, user_msg=None) -> NoReturn: + """Fail immediately with the given standard message and user message.""" + return super(TestCase, self).fail(self._formatMessage(user_msg, msg)) + + +def _sorted_list_difference(expected, actual): + # type: (List[_T], List[_T]) -> Tuple[List[_T], List[_T]] + """Finds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + + Args: + expected: The list we expected. + actual: The list we actually got. + Returns: + (missing, unexpected) + missing: items in expected that are not in actual. + unexpected: items in actual that are not in expected. + """ + i = j = 0 + missing = [] + unexpected = [] + while True: + try: + e = expected[i] + a = actual[j] + if e < a: + missing.append(e) + i += 1 + while expected[i] == e: + i += 1 + elif e > a: + unexpected.append(a) + j += 1 + while actual[j] == a: + j += 1 + else: + i += 1 + try: + while expected[i] == e: + i += 1 + finally: + j += 1 + while actual[j] == a: + j += 1 + except IndexError: + missing.extend(expected[i:]) + unexpected.extend(actual[j:]) + break + return missing, unexpected + + +def _are_both_of_integer_type(a, b): + # type: (object, object) -> bool + return isinstance(a, int) and isinstance(b, int) + + +def _are_both_of_sequence_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Sequence) and isinstance( + b, abc.Sequence) and not isinstance( + a, _TEXT_OR_BINARY_TYPES) and not isinstance(b, _TEXT_OR_BINARY_TYPES) + + +def _are_both_of_set_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Set) and isinstance(b, abc.Set) + + +def _are_both_of_mapping_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Mapping) and isinstance( + b, abc.Mapping) + + +def _walk_structure_for_problems( + a, b, aname, bname, problem_list, leaf_assert_equal_func, failure_exception +): + """The recursive comparison behind assertSameStructure.""" + if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck + _are_both_of_integer_type(a, b) or _are_both_of_sequence_type(a, b) or + _are_both_of_set_type(a, b) or _are_both_of_mapping_type(a, b)): + # We do not distinguish between int and long types as 99.99% of Python 2 + # code should never care. They collapse into a single type in Python 3. + problem_list.append('%s is a %r but %s is a %r' % + (aname, type(a), bname, type(b))) + # If they have different types there's no point continuing + return + + if isinstance(a, abc.Set): + for k in a: + if k not in b: + problem_list.append( + '%s has %r but %s does not' % (aname, k, bname)) + for k in b: + if k not in a: + problem_list.append('%s lacks %r but %s has it' % (aname, k, bname)) + + # NOTE: a or b could be a defaultdict, so we must take care that the traversal + # doesn't modify the data. + elif isinstance(a, abc.Mapping): + for k in a: + if k in b: + _walk_structure_for_problems( + a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k), + problem_list, leaf_assert_equal_func, failure_exception) + else: + problem_list.append( + "%s has [%r] with value %r but it's missing in %s" % + (aname, k, a[k], bname)) + for k in b: + if k not in a: + problem_list.append( + '%s lacks [%r] but %s has it with value %r' % + (aname, k, bname, b[k])) + + # Strings/bytes are Sequences but we'll just do those with regular != + elif (isinstance(a, abc.Sequence) and + not isinstance(a, _TEXT_OR_BINARY_TYPES)): + minlen = min(len(a), len(b)) + for i in range(minlen): + _walk_structure_for_problems( + a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i), + problem_list, leaf_assert_equal_func, failure_exception) + for i in range(minlen, len(a)): + problem_list.append('%s has [%i] with value %r but %s does not' % + (aname, i, a[i], bname)) + for i in range(minlen, len(b)): + problem_list.append('%s lacks [%i] but %s has it with value %r' % + (aname, i, bname, b[i])) + + else: + try: + leaf_assert_equal_func(a, b) + except failure_exception: + problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b)) + + +def get_command_string(command): + """Returns an escaped string that can be used as a shell command. + + Args: + command: List or string representing the command to run. + Returns: + A string suitable for use as a shell command. + """ + if isinstance(command, str): + return command + else: + if os.name == 'nt': + return ' '.join(command) + else: + # The following is identical to Python 3's shlex.quote function. + command_string = '' + for word in command: + # Single quote word, and replace each ' in word with '"'"' + command_string += "'" + word.replace("'", "'\"'\"'") + "' " + return command_string[:-1] + + +def get_command_stderr(command, env=None, close_fds=True): + """Runs the given shell command and returns a tuple. + + Args: + command: List or string representing the command to run. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after forking. + On Windows, this is ignored and close_fds is always False. + + Returns: + Tuple of (exit status, text printed to stdout and stderr by the command). + """ + if env is None: env = {} + if os.name == 'nt': + # Windows does not support setting close_fds to True while also redirecting + # standard handles. + close_fds = False + + use_shell = isinstance(command, str) + process = subprocess.Popen( + command, + close_fds=close_fds, + env=env, + shell=use_shell, + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE) + output = process.communicate()[0] + exit_status = process.wait() + return (exit_status, output) + + +def _quote_long_string(s): + # type: (Union[Text, bytes, bytearray]) -> Text + """Quotes a potentially multi-line string to make the start and end obvious. + + Args: + s: A string. + + Returns: + The quoted string. + """ + if isinstance(s, (bytes, bytearray)): + try: + s = s.decode('utf-8') + except UnicodeDecodeError: + s = str(s) + return ('8<-----------\n' + + s + '\n' + + '----------->8\n') + + +def print_python_version(): + # type: () -> None + # Having this in the test output logs by default helps debugging when all + # you've got is the log and no other idea of which Python was used. + sys.stderr.write('Running tests under Python {0[0]}.{0[1]}.{0[2]}: ' + '{1}\n'.format( + sys.version_info, + sys.executable if sys.executable else 'embedded.')) + + +def main(*args, **kwargs): + # type: (Text, Any) -> None + """Executes a set of Python unit tests. + + Usually this function is called without arguments, so the + unittest.TestProgram instance will get created with the default settings, + so it will run all test methods of all TestCase classes in the ``__main__`` + module. + + Args: + *args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + **kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + print_python_version() + _run_in_app(run_tests, args, kwargs) + + +def _is_in_app_main(): + # type: () -> bool + """Returns True iff app.run is active.""" + f = sys._getframe().f_back # pylint: disable=protected-access + while f: + if f.f_code == app.run.__code__: + return True + f = f.f_back + return False + + +def _register_sigterm_with_faulthandler(): + # type: () -> None + """Have faulthandler dump stacks on SIGTERM. Useful to diagnose timeouts.""" + if getattr(faulthandler, 'register', None): + # faulthandler.register is not available on Windows. + # faulthandler.enable() is already called by app.run. + try: + faulthandler.register(signal.SIGTERM, chain=True) # pytype: disable=module-attr + except Exception as e: # pylint: disable=broad-except + sys.stderr.write('faulthandler.register(SIGTERM) failed ' + '%r; ignoring.\n' % e) + + +def _run_in_app(function, args, kwargs): + # type: (Callable[..., None], Sequence[Text], Mapping[Text, Any]) -> None + """Executes a set of Python unit tests, ensuring app.run. + + This is a private function, users should call absltest.main(). + + _run_in_app calculates argv to be the command-line arguments of this program + (without the flags), sets the default of FLAGS.alsologtostderr to True, + then it calls function(argv, args, kwargs), making sure that `function' + will get called within app.run(). _run_in_app does this by checking whether + it is called by app.run(), or by calling app.run() explicitly. + + The reason why app.run has to be ensured is to make sure that + flags are parsed and stripped properly, and other initializations done by + the app module are also carried out, no matter if absltest.run() is called + from within or outside app.run(). + + If _run_in_app is called from within app.run(), then it will reparse + sys.argv and pass the result without command-line flags into the argv + argument of `function'. The reason why this parsing is needed is that + __main__.main() calls absltest.main() without passing its argv. So the + only way _run_in_app could get to know the argv without the flags is that + it reparses sys.argv. + + _run_in_app changes the default of FLAGS.alsologtostderr to True so that the + test program's stderr will contain all the log messages unless otherwise + specified on the command-line. This overrides any explicit assignment to + FLAGS.alsologtostderr by the test program prior to the call to _run_in_app() + (e.g. in __main__.main). + + Please note that _run_in_app (and the function it calls) is allowed to make + changes to kwargs. + + Args: + function: absltest.run_tests or a similar function. It will be called as + function(argv, args, kwargs) where argv is a list containing the + elements of sys.argv without the command-line flags. + args: Positional arguments passed through to unittest.TestProgram.__init__. + kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. + """ + if _is_in_app_main(): + _register_sigterm_with_faulthandler() + + # Change the default of alsologtostderr from False to True, so the test + # programs's stderr will contain all the log messages. + # If --alsologtostderr=false is specified in the command-line, or user + # has called FLAGS.alsologtostderr = False before, then the value is kept + # False. + FLAGS.set_default('alsologtostderr', True) + + # Here we only want to get the `argv` without the flags. To avoid any + # side effects of parsing flags, we temporarily stub out the `parse` method + stored_parse_methods = {} + noop_parse = lambda _: None + for name in FLAGS: + # Avoid any side effects of parsing flags. + stored_parse_methods[name] = FLAGS[name].parse + # This must be a separate loop since multiple flag names (short_name=) can + # point to the same flag object. + for name in FLAGS: + FLAGS[name].parse = noop_parse + try: + argv = FLAGS(sys.argv) + finally: + for name in FLAGS: + FLAGS[name].parse = stored_parse_methods[name] + sys.stdout.flush() + + function(argv, args, kwargs) + else: + # Send logging to stderr. Use --alsologtostderr instead of --logtostderr + # in case tests are reading their own logs. + FLAGS.set_default('alsologtostderr', True) + + def main_function(argv): + _register_sigterm_with_faulthandler() + function(argv, args, kwargs) + + app.run(main=main_function) + + +def _is_suspicious_attribute(testCaseClass, name): + # type: (Type, Text) -> bool + """Returns True if an attribute is a method named like a test method.""" + if name.startswith('Test') and len(name) > 4 and name[4].isupper(): + attr = getattr(testCaseClass, name) + if inspect.isfunction(attr) or inspect.ismethod(attr): + args = inspect.getfullargspec(attr) + return (len(args.args) == 1 and args.args[0] == 'self' and + args.varargs is None and args.varkw is None and + not args.kwonlyargs) + return False + + +def skipThisClass(reason): + # type: (Text) -> Callable[[_T], _T] + """Skip tests in the decorated TestCase, but not any of its subclasses. + + This decorator indicates that this class should skip all its tests, but not + any of its subclasses. Useful for if you want to share testMethod or setUp + implementations between a number of concrete testcase classes. + + Example usage, showing how you can share some common test methods between + subclasses. In this example, only ``BaseTest`` will be marked as skipped, and + not RealTest or SecondRealTest:: + + @absltest.skipThisClass("Shared functionality") + class BaseTest(absltest.TestCase): + def test_simple_functionality(self): + self.assertEqual(self.system_under_test.method(), 1) + + class RealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(argument) + + def test_specific_behavior(self): + ... + + class SecondRealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(other_arguments) + + def test_other_behavior(self): + ... + + Args: + reason: The reason we have a skip in place. For instance: 'shared test + methods' or 'shared assertion methods'. + + Returns: + Decorator function that will cause a class to be skipped. + """ + if isinstance(reason, type): + raise TypeError('Got {!r}, expected reason as string'.format(reason)) + + def _skip_class(test_case_class): + if not issubclass(test_case_class, unittest.TestCase): + raise TypeError( + 'Decorating {!r}, expected TestCase subclass'.format(test_case_class)) + + # Only shadow the setUpClass method if it is directly defined. If it is + # in the parent class we invoke it via a super() call instead of holding + # a reference to it. + shadowed_setupclass = test_case_class.__dict__.get('setUpClass', None) + + @classmethod + def replacement_setupclass(cls, *args, **kwargs): + # Skip this class if it is the one that was decorated with @skipThisClass + if cls is test_case_class: + raise SkipTest(reason) + if shadowed_setupclass: + # Pass along `cls` so the MRO chain doesn't break. + # The original method is a `classmethod` descriptor, which can't + # be directly called, but `__func__` has the underlying function. + return shadowed_setupclass.__func__(cls, *args, **kwargs) + else: + # Because there's no setUpClass() defined directly on test_case_class, + # we call super() ourselves to continue execution of the inheritance + # chain. + return super(test_case_class, cls).setUpClass(*args, **kwargs) + + test_case_class.setUpClass = replacement_setupclass + return test_case_class + + return _skip_class + + +class TestLoader(unittest.TestLoader): + """A test loader which supports common test features. + + Supported features include: + * Banning untested methods with test-like names: methods attached to this + testCase with names starting with `Test` are ignored by the test runner, + and often represent mistakenly-omitted test cases. This loader will raise + a TypeError when attempting to load a TestCase with such methods. + * Randomization of test case execution order (optional). + """ + + _ERROR_MSG = textwrap.dedent("""Method '%s' is named like a test case but + is not one. This is often a bug. If you want it to be a test method, + name it with 'test' in lowercase. If not, rename the method to not begin + with 'Test'.""") + + def __init__(self, *args, **kwds): + super(TestLoader, self).__init__(*args, **kwds) + seed = _get_default_randomize_ordering_seed() + if seed: + self._randomize_ordering_seed = seed + self._random = random.Random(self._randomize_ordering_seed) + else: + self._randomize_ordering_seed = None + self._random = None + + def getTestCaseNames(self, testCaseClass): # pylint:disable=invalid-name + """Validates and returns a (possibly randomized) list of test case names.""" + for name in dir(testCaseClass): + if _is_suspicious_attribute(testCaseClass, name): + raise TypeError(TestLoader._ERROR_MSG % name) + names = list(super(TestLoader, self).getTestCaseNames(testCaseClass)) + if self._randomize_ordering_seed is not None: + logging.info( + 'Randomizing test order with seed: %d', self._randomize_ordering_seed) + logging.info( + 'To reproduce this order, re-run with ' + '--test_randomize_ordering_seed=%d', self._randomize_ordering_seed) + self._random.shuffle(names) + return names + + +def get_default_xml_output_filename(): + # type: () -> Optional[Text] + if os.environ.get('XML_OUTPUT_FILE'): + return os.environ['XML_OUTPUT_FILE'] + elif os.environ.get('RUNNING_UNDER_TEST_DAEMON'): + return os.path.join(os.path.dirname(TEST_TMPDIR.value), 'test_detail.xml') + elif os.environ.get('TEST_XMLOUTPUTDIR'): + return os.path.join( + os.environ['TEST_XMLOUTPUTDIR'], + os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.xml') + + +def _setup_filtering(argv: MutableSequence[str]) -> bool: + """Implements the bazel test filtering protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_ONLY: string, if set, is forwarded to the unittest + framework to use as a test filter. Its value is split with shlex, then: + 1. On Python 3.6 and before, split values are passed as positional + arguments on argv. + 2. On Python 3.7+, split values are passed to unittest's `-k` flag. Tests + are matched by glob patterns or substring. See + https://docs.python.org/3/library/unittest.html#cmdoption-unittest-k + + Args: + argv: the argv to mutate in-place. + + Returns: + Whether test filtering is requested. + """ + test_filter = os.environ.get('TESTBRIDGE_TEST_ONLY') + if argv is None or not test_filter: + return False + + filters = shlex.split(test_filter) + if sys.version_info[:2] >= (3, 7): + filters = ['-k=' + test_filter for test_filter in filters] + + argv[1:1] = filters + return True + + +def _setup_test_runner_fail_fast(argv): + # type: (MutableSequence[Text]) -> None + """Implements the bazel test fail fast protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_RUNNER_FAIL_FAST=<1|0> + + If set to 1, --failfast is passed to the unittest framework to return upon + first failure. + + Args: + argv: the argv to mutate in-place. + """ + + if argv is None: + return + + if os.environ.get('TESTBRIDGE_TEST_RUNNER_FAIL_FAST') != '1': + return + + argv[1:1] = ['--failfast'] + + +def _setup_sharding( + custom_loader: Optional[unittest.TestLoader] = None, +) -> Tuple[unittest.TestLoader, Optional[int]]: + """Implements the bazel sharding protocol. + + The following environment variables are used in this method: + + TEST_SHARD_STATUS_FILE: string, if set, points to a file. We write a blank + file to tell the test runner that this test implements the test sharding + protocol. + + TEST_TOTAL_SHARDS: int, if set, sharding is requested. + + TEST_SHARD_INDEX: int, must be set if TEST_TOTAL_SHARDS is set. Specifies + the shard index for this instance of the test process. Must satisfy: + 0 <= TEST_SHARD_INDEX < TEST_TOTAL_SHARDS. + + Args: + custom_loader: A TestLoader to be made sharded. + + Returns: + A tuple of ``(test_loader, shard_index)``. ``test_loader`` is for + shard-filtering or the standard test loader depending on the sharding + environment variables. ``shard_index`` is the shard index, or ``None`` when + sharding is not used. + """ + + # It may be useful to write the shard file even if the other sharding + # environment variables are not set. Test runners may use this functionality + # to query whether a test binary implements the test sharding protocol. + if 'TEST_SHARD_STATUS_FILE' in os.environ: + try: + with open(os.environ['TEST_SHARD_STATUS_FILE'], 'w') as f: + f.write('') + except IOError: + sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.' + % os.environ['TEST_SHARD_STATUS_FILE']) + sys.exit(1) + + base_loader = custom_loader or TestLoader() + if 'TEST_TOTAL_SHARDS' not in os.environ: + # Not using sharding, use the expected test loader. + return base_loader, None + + total_shards = int(os.environ['TEST_TOTAL_SHARDS']) + shard_index = int(os.environ['TEST_SHARD_INDEX']) + + if shard_index < 0 or shard_index >= total_shards: + sys.stderr.write('ERROR: Bad sharding values. index=%d, total=%d\n' % + (shard_index, total_shards)) + sys.exit(1) + + # Replace the original getTestCaseNames with one that returns + # the test case names for this shard. + delegate_get_names = base_loader.getTestCaseNames + + bucket_iterator = itertools.cycle(range(total_shards)) + + def getShardedTestCaseNames(testCaseClass): + filtered_names = [] + # We need to sort the list of tests in order to determine which tests this + # shard is responsible for; however, it's important to preserve the order + # returned by the base loader, e.g. in the case of randomized test ordering. + ordered_names = delegate_get_names(testCaseClass) + for testcase in sorted(ordered_names): + bucket = next(bucket_iterator) + if bucket == shard_index: + filtered_names.append(testcase) + return [x for x in ordered_names if x in filtered_names] + + base_loader.getTestCaseNames = getShardedTestCaseNames + return base_loader, shard_index + + +def _run_and_get_tests_result( + argv: MutableSequence[str], + args: Sequence[Any], + kwargs: MutableMapping[str, Any], + xml_test_runner_class: Type[unittest.TextTestRunner], +) -> Tuple[unittest.TestResult, bool]: + """Same as run_tests, but it doesn't exit. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + xml_test_runner_class: The type of the test runner class. + + Returns: + A tuple of ``(test_result, fail_when_no_tests_ran)``. + ``fail_when_no_tests_ran`` indicates whether the test should fail when + no tests ran. + """ + + # The entry from kwargs overrides argv. + argv = kwargs.pop('argv', argv) + + if sys.version_info[:2] >= (3, 12): + # Python 3.12 unittest changed the behavior from PASS to FAIL in + # https://github.com/python/cpython/pull/102051. absltest follows this. + fail_when_no_tests_ran = True + else: + # Historically, absltest and unittest before Python 3.12 passes if no tests + # ran. + fail_when_no_tests_ran = False + + # Set up test filtering if requested in environment. + if _setup_filtering(argv): + # When test filtering is requested, ideally we also want to fail when no + # tests ran. However, the test filters are usually done when running bazel. + # When you run multiple targets, e.g. `bazel test //my_dir/... + # --test_filter=MyTest`, you don't necessarily want individual tests to fail + # because no tests match in that particular target. + # Due to this use case, we don't fail when test filtering is requested via + # the environment variable from bazel. + fail_when_no_tests_ran = False + + # Set up --failfast as requested in environment + _setup_test_runner_fail_fast(argv) + + # Shard the (default or custom) loader if sharding is turned on. + kwargs['testLoader'], shard_index = _setup_sharding( + kwargs.get('testLoader', None) + ) + if shard_index is not None and shard_index > 0: + # When sharding is requested, all the shards except the first one shall not + # fail when no tests ran. This happens when the shard count is greater than + # the test case count. + fail_when_no_tests_ran = False + + # XML file name is based upon (sorted by priority): + # --xml_output_file flag, XML_OUTPUT_FILE variable, + # TEST_XMLOUTPUTDIR variable or RUNNING_UNDER_TEST_DAEMON variable. + if not FLAGS.xml_output_file: + FLAGS.xml_output_file = get_default_xml_output_filename() + xml_output_file = FLAGS.xml_output_file + + xml_buffer = None + if xml_output_file: + xml_output_dir = os.path.dirname(xml_output_file) + if xml_output_dir and not os.path.isdir(xml_output_dir): + try: + os.makedirs(xml_output_dir) + except OSError as e: + # File exists error can occur with concurrent tests + if e.errno != errno.EEXIST: + raise + # Fail early if we can't write to the XML output file. This is so that we + # don't waste people's time running tests that will just fail anyways. + with _open(xml_output_file, 'w'): + pass + + # We can reuse testRunner if it supports XML output (e. g. by inheriting + # from xml_reporter.TextAndXMLTestRunner). Otherwise we need to use + # xml_reporter.TextAndXMLTestRunner. + if (kwargs.get('testRunner') is not None + and not hasattr(kwargs['testRunner'], 'set_default_xml_stream')): + sys.stderr.write('WARNING: XML_OUTPUT_FILE or --xml_output_file setting ' + 'overrides testRunner=%r setting (possibly from --pdb)' + % (kwargs['testRunner'])) + # Passing a class object here allows TestProgram to initialize + # instances based on its kwargs and/or parsed command-line args. + kwargs['testRunner'] = xml_test_runner_class + if kwargs.get('testRunner') is None: + kwargs['testRunner'] = xml_test_runner_class + # Use an in-memory buffer (not backed by the actual file) to store the XML + # report, because some tools modify the file (e.g., create a placeholder + # with partial information, in case the test process crashes). + xml_buffer = io.StringIO() + kwargs['testRunner'].set_default_xml_stream(xml_buffer) # pytype: disable=attribute-error + + # If we've used a seed to randomize test case ordering, we want to record it + # as a top-level attribute in the `testsuites` section of the XML output. + randomize_ordering_seed = getattr( + kwargs['testLoader'], '_randomize_ordering_seed', None) + setter = getattr(kwargs['testRunner'], 'set_testsuites_property', None) + if randomize_ordering_seed and setter: + setter('test_randomize_ordering_seed', randomize_ordering_seed) + elif kwargs.get('testRunner') is None: + kwargs['testRunner'] = _pretty_print_reporter.TextTestRunner + + if FLAGS.pdb_post_mortem: + runner = kwargs['testRunner'] + # testRunner can be a class or an instance, which must be tested for + # differently. + # Overriding testRunner isn't uncommon, so only enable the debugging + # integration if the runner claims it does; we don't want to accidentally + # clobber something on the runner. + if ((isinstance(runner, type) and + issubclass(runner, _pretty_print_reporter.TextTestRunner)) or + isinstance(runner, _pretty_print_reporter.TextTestRunner)): + runner.run_for_debugging = True + + # Make sure tmpdir exists. + if not os.path.isdir(TEST_TMPDIR.value): + try: + os.makedirs(TEST_TMPDIR.value) + except OSError as e: + # Concurrent test might have created the directory. + if e.errno != errno.EEXIST: + raise + + # Let unittest.TestProgram.__init__ do its own argv parsing, e.g. for '-v', + # on argv, which is sys.argv without the command-line flags. + kwargs['argv'] = argv + + # Request unittest.TestProgram to not exit. The exit will be handled by + # `absltest.run_tests`. + kwargs['exit'] = False + + try: + test_program = unittest.TestProgram(*args, **kwargs) + return test_program.result, fail_when_no_tests_ran + finally: + if xml_buffer: + try: + with _open(xml_output_file, 'w') as f: + f.write(xml_buffer.getvalue()) + finally: + xml_buffer.close() + + +def run_tests( + argv: MutableSequence[Text], + args: Sequence[Any], + kwargs: MutableMapping[Text, Any], +) -> None: + """Executes a set of Python unit tests. + + Most users should call absltest.main() instead of run_tests. + + Please note that run_tests should be called from app.run. + Calling absltest.main() would ensure that. + + Please note that run_tests is allowed to make changes to kwargs. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + result, fail_when_no_tests_ran = _run_and_get_tests_result( + argv, args, kwargs, xml_reporter.TextAndXMLTestRunner + ) + if fail_when_no_tests_ran and result.testsRun == 0 and not result.skipped: + # Python 3.12 unittest exits with 5 when no tests ran. The exit code 5 comes + # from pytest which does the same thing. + sys.exit(5) + sys.exit(not result.wasSuccessful()) + + +def _rmtree_ignore_errors(path): + # type: (Text) -> None + if os.path.isfile(path): + try: + os.unlink(path) + except OSError: + pass + else: + shutil.rmtree(path, ignore_errors=True) + + +def _get_first_part(path): + # type: (Text) -> Text + parts = path.split(os.sep, 1) + return parts[0] diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/flagsaver.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/flagsaver.py new file mode 100644 index 0000000000000000000000000000000000000000..7df072292674f14d45b668fe4c36d61208697479 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/flagsaver.py @@ -0,0 +1,386 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Decorator and context manager for saving and restoring flag values. + +There are many ways to save and restore. Always use the most convenient method +for a given use case. + +Here are examples of each method. They all call ``do_stuff()`` while +``FLAGS.someflag`` is temporarily set to ``'foo'``:: + + from absl.testing import flagsaver + + # Use a decorator which can optionally override flags via arguments. + @flagsaver.flagsaver(someflag='foo') + def some_func(): + do_stuff() + + # Use a decorator which can optionally override flags with flagholders. + @flagsaver.flagsaver((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, 23)) + def some_func(): + do_stuff() + + # Use a decorator which does not override flags itself. + @flagsaver.flagsaver + def some_func(): + FLAGS.someflag = 'foo' + do_stuff() + + # Use a context manager which can optionally override flags via arguments. + with flagsaver.flagsaver(someflag='foo'): + do_stuff() + + # Save and restore the flag values yourself. + saved_flag_values = flagsaver.save_flag_values() + try: + FLAGS.someflag = 'foo' + do_stuff() + finally: + flagsaver.restore_flag_values(saved_flag_values) + + # Use the parsing version to emulate users providing the flags. + # Note that all flags must be provided as strings (unparsed). + @flagsaver.as_parsed(some_int_flag='123') + def some_func(): + # Because the flag was parsed it is considered "present". + assert FLAGS.some_int_flag.present + do_stuff() + + # flagsaver.as_parsed() can also be used as a context manager just like + # flagsaver.flagsaver() + with flagsaver.as_parsed(some_int_flag='123'): + do_stuff() + + # The flagsaver.as_parsed() interface also supports FlagHolder objects. + @flagsaver.as_parsed((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, '23')) + def some_func(): + do_stuff() + + # Using as_parsed with a multi_X flag requires a sequence of strings. + @flagsaver.as_parsed(some_multi_int_flag=['123', '456']) + def some_func(): + assert FLAGS.some_multi_int_flag.present + do_stuff() + + # If a flag name includes non-identifier characters it can be specified like + # so: + @flagsaver.as_parsed(**{'i-like-dashes': 'true'}) + def some_func(): + do_stuff() + +We save and restore a shallow copy of each Flag object's ``__dict__`` attribute. +This preserves all attributes of the flag, such as whether or not it was +overridden from its default value. + +WARNING: Currently a flag that is saved and then deleted cannot be restored. An +exception will be raised. However if you *add* a flag after saving flag values, +and then restore flag values, the added flag will be deleted with no errors. +""" + +import collections +import functools +import inspect +from typing import overload, Any, Callable, Mapping, Tuple, TypeVar, Type, Sequence, Union + +from absl import flags + +FLAGS = flags.FLAGS + + +# The type of pre/post wrapped functions. +_CallableT = TypeVar('_CallableT', bound=Callable) + + +@overload +def flagsaver(*args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def flagsaver(func: _CallableT) -> _CallableT: + ... + + +def flagsaver(*args, **kwargs): + """The main flagsaver interface. See module doc for usage.""" + return _construct_overrider(_FlagOverrider, *args, **kwargs) + + +@overload +def as_parsed(*args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def as_parsed(func: _CallableT) -> _CallableT: + ... + + +def as_parsed(*args, **kwargs): + """Overrides flags by parsing strings, saves flag state similar to flagsaver. + + This function can be used as either a decorator or context manager similar to + flagsaver.flagsaver(). However, where flagsaver.flagsaver() directly sets the + flags to new values, this function will parse the provided arguments as if + they were provided on the command line. Among other things, this will cause + `FLAGS['flag_name'].present == True`. + + A note on unparsed input: For many flag types, the unparsed version will be + a single string. However for multi_x (multi_string, multi_integer, multi_enum) + the unparsed version will be a Sequence of strings. + + Args: + *args: Tuples of FlagHolders and their unparsed value. + **kwargs: The keyword args are flag names, and the values are unparsed + values. + + Returns: + _ParsingFlagOverrider that serves as a context manager or decorator. Will + save previous flag state and parse new flags, then on cleanup it will + restore the previous flag state. + """ + return _construct_overrider(_ParsingFlagOverrider, *args, **kwargs) + + +# NOTE: the order of these overload declarations matters. The type checker will +# pick the first match which could be incorrect. +@overload +def _construct_overrider( + flag_overrider_cls: Type['_ParsingFlagOverrider'], + *args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + *args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + func: _CallableT) -> _CallableT: + ... + + +def _construct_overrider(flag_overrider_cls, *args, **kwargs): + """Handles the args/kwargs returning an instance of flag_overrider_cls. + + If flag_overrider_cls is _FlagOverrider then values should be native python + types matching the python types. Otherwise if flag_overrider_cls is + _ParsingFlagOverrider the values should be strings or sequences of strings. + + Args: + flag_overrider_cls: The class that will do the overriding. + *args: Tuples of FlagHolder and the new flag value. + **kwargs: Keword args mapping flag name to new flag value. + + Returns: + A _FlagOverrider to be used as a decorator or context manager. + """ + if not args: + return flag_overrider_cls(**kwargs) + # args can be [func] if used as `@flagsaver` instead of `@flagsaver(...)` + if len(args) == 1 and callable(args[0]): + if kwargs: + raise ValueError( + "It's invalid to specify both positional and keyword parameters.") + func = args[0] + if inspect.isclass(func): + raise TypeError('@flagsaver.flagsaver cannot be applied to a class.') + return _wrap(flag_overrider_cls, func, {}) + # args can be a list of (FlagHolder, value) pairs. + # In which case they augment any specified kwargs. + for arg in args: + if not isinstance(arg, tuple) or len(arg) != 2: + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + holder, value = arg + if not isinstance(holder, flags.FlagHolder): + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + if holder.name in kwargs: + raise ValueError('Cannot set --%s multiple times' % holder.name) + kwargs[holder.name] = value + return flag_overrider_cls(**kwargs) + + +def save_flag_values( + flag_values: flags.FlagValues = FLAGS) -> Mapping[str, Mapping[str, Any]]: + """Returns copy of flag values as a dict. + + Args: + flag_values: FlagValues, the FlagValues instance with which the flag will be + saved. This should almost never need to be overridden. + + Returns: + Dictionary mapping keys to values. Keys are flag names, values are + corresponding ``__dict__`` members. E.g. ``{'key': value_dict, ...}``. + """ + return {name: _copy_flag_dict(flag_values[name]) for name in flag_values} + + +def restore_flag_values(saved_flag_values: Mapping[str, Mapping[str, Any]], + flag_values: flags.FlagValues = FLAGS): + """Restores flag values based on the dictionary of flag values. + + Args: + saved_flag_values: {'flag_name': value_dict, ...} + flag_values: FlagValues, the FlagValues instance from which the flag will be + restored. This should almost never need to be overridden. + """ + new_flag_names = list(flag_values) + for name in new_flag_names: + saved = saved_flag_values.get(name) + if saved is None: + # If __dict__ was not saved delete "new" flag. + delattr(flag_values, name) + else: + if flag_values[name].value != saved['_value']: + flag_values[name].value = saved['_value'] # Ensure C++ value is set. + flag_values[name].__dict__ = saved + + +@overload +def _wrap(flag_overrider_cls: Type['_FlagOverrider'], func: _CallableT, + overrides: Mapping[str, Any]) -> _CallableT: + ... + + +@overload +def _wrap(flag_overrider_cls: Type['_ParsingFlagOverrider'], func: _CallableT, + overrides: Mapping[str, Union[str, Sequence[str]]]) -> _CallableT: + ... + + +def _wrap(flag_overrider_cls, func, overrides): + """Creates a wrapper function that saves/restores flag values. + + Args: + flag_overrider_cls: The class that will be used as a context manager. + func: This will be called between saving flags and restoring flags. + overrides: Flag names mapped to their values. These flags will be set after + saving the original flag state. The type of the values depends on if + _FlagOverrider or _ParsingFlagOverrider was specified. + + Returns: + A wrapped version of func. + """ + + @functools.wraps(func) + def _flagsaver_wrapper(*args, **kwargs): + """Wrapper function that saves and restores flags.""" + with flag_overrider_cls(**overrides): + return func(*args, **kwargs) + + return _flagsaver_wrapper + + +class _FlagOverrider(object): + """Overrides flags for the duration of the decorated function call. + + It also restores all original values of flags after decorated method + completes. + """ + + def __init__(self, **overrides: Any): + self._overrides = overrides + self._saved_flag_values = None + + def __call__(self, func: _CallableT) -> _CallableT: + if inspect.isclass(func): + raise TypeError('flagsaver cannot be applied to a class.') + return _wrap(self.__class__, func, self._overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + FLAGS._set_attributes(**self._overrides) + except: + # It may fail because of flag validators. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + def __exit__(self, exc_type, exc_value, traceback): + restore_flag_values(self._saved_flag_values, FLAGS) + + +class _ParsingFlagOverrider(_FlagOverrider): + """Context manager for overriding flags. + + Simulates command line parsing. + + This is simlar to _FlagOverrider except that all **overrides should be + strings or sequences of strings, and when context is entered this class calls + .parse(value) + + This results in the flags having .present set properly. + """ + + def __init__(self, **overrides: Union[str, Sequence[str]]): + for flag_name, new_value in overrides.items(): + if isinstance(new_value, str): + continue + if (isinstance(new_value, collections.abc.Sequence) and + all(isinstance(single_value, str) for single_value in new_value)): + continue + raise TypeError( + f'flagsaver.as_parsed() cannot parse {flag_name}. Expected a single ' + f'string or sequence of strings but {type(new_value)} was provided.') + super().__init__(**overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + for flag_name, unparsed_value in self._overrides.items(): + # LINT.IfChange(flag_override_parsing) + FLAGS[flag_name].parse(unparsed_value) + FLAGS[flag_name].using_default_value = False + # LINT.ThenChange() + + # Perform the validation on all modified flags. This is something that + # FLAGS._set_attributes() does for you in _FlagOverrider. + for flag_name in self._overrides: + FLAGS._assert_validators(FLAGS[flag_name].validators) + + except KeyError as e: + # If a flag doesn't exist, an UnrecognizedFlagError is more specific. + restore_flag_values(self._saved_flag_values, FLAGS) + raise flags.UnrecognizedFlagError('Unknown command line flag.') from e + + except: + # It may fail because of flag validators or general parsing issues. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + +def _copy_flag_dict(flag: flags.Flag) -> Mapping[str, Any]: + """Returns a copy of the flag object's ``__dict__``. + + It's mostly a shallow copy of the ``__dict__``, except it also does a shallow + copy of the validator list. + + Args: + flag: flags.Flag, the flag to copy. + + Returns: + A copy of the flag object's ``__dict__``. + """ + copy = flag.__dict__.copy() + copy['_value'] = flag.value # Ensure correct restore for C++ flags. + copy['validators'] = list(flag.validators) + return copy diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/parameterized.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/parameterized.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d2c2b51f83358d6e2c83531226c7ea07f526fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/parameterized.py @@ -0,0 +1,724 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters):: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name and the parameters for that test. + +The id method of the test, used internally by the unittest framework, is also +modified to show the arguments (but note that the name reported by `id()` +doesn't match the actual test name, see below). To make sure that test names +stay the same across several invocations, object representations like:: + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into ``__main__.Foo``. When selecting a subset of test cases to run +on the command-line, the test cases contain an index suffix for each argument +in the order they were passed to :func:`parameters` (eg. testAddition0, +testAddition1, etc.) This naming scheme is subject to change; for more reliable +and stable names, especially in test logs, use :func:`named_parameters` instead. + +Tests using :func:`named_parameters` are similar to :func:`parameters`, except +only tuples or dicts of args are supported. For tuples, the first parameter arg +has to be a string (or an object that returns an apt name when converted via +``str()``). For dicts, a value for the key ``testcase_name`` must be present and +must be a string (or an object that returns an apt name when converted via +``str()``):: + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + {'testcase_name': 'Normal', + 'result': True, 'string': 'aaa', 'prefix': 'aa'}, + {'testcase_name': 'EmptyPrefix', + 'result': True, 'string': 'abc', 'prefix': ''}, + {'testcase_name': 'BothEmpty', + 'result': True, 'string': '', 'prefix': ''}) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line:: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== + +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated:: + + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg1, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== + +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ + +If a test method takes only one argument, the single arguments must not be +wrapped into a tuple:: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) + + +List/tuple as a Single Argument +=============================== + +If a test method takes a single argument of a list/tuple, it must be wrapped +inside a tuple:: + + class ZeroSumExample(parameterized.TestCase): + @parameterized.parameters( + ([-1, 0, 1], ), + ([-2, 0, 2], ), + ) + def testSumIsZero(self, arg): + self.assertEqual(0, sum(arg)) + + +Cartesian product of Parameter Values as Parameterized Test Cases +================================================================= + +If required to test method over a cartesian product of parameters, +`parameterized.product` may be used to facilitate generation of parameters +test combinations:: + + class TestModuloExample(parameterized.TestCase): + @parameterized.product( + num=[0, 20, 80], + modulo=[2, 4], + expected=[0] + ) + def testModuloResult(self, num, modulo, expected): + self.assertEqual(expected, num % modulo) + +This results in 6 test cases being created - one for each combination of the +parameters. It is also possible to supply sequences of keyword argument dicts +as elements of the cartesian product:: + + @parameterized.product( + (dict(num=5, modulo=3, expected=2), + dict(num=7, modulo=4, expected=3)), + dtype=(int, float) + ) + def testModuloResult(self, num, modulo, expected, dtype): + self.assertEqual(expected, dtype(num) % modulo) + +This results in 4 test cases being created - for each of the two sets of test +data (supplied as kwarg dicts) and for each of the two data types (supplied as +a named parameter). Multiple keyword argument dicts may be supplied if required. + +Async Support +============= + +If a test needs to call async functions, it can inherit from both +parameterized.TestCase and another TestCase that supports async calls, such +as [asynctest](https://github.com/Martiusweb/asynctest):: + + import asynctest + + class AsyncExample(parameterized.TestCase, asynctest.TestCase): + @parameterized.parameters( + ('a', 1), + ('b', 2), + ) + async def testSomeAsyncFunction(self, arg, expected): + actual = await someAsyncFunction(arg) + self.assertEqual(actual, expected) +""" + +from collections import abc +import functools +import inspect +import itertools +import re +import types +import unittest +import warnings + +from absl.testing import absltest + + +_ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_NAMED = object() +_ARGUMENT_REPR = object() +_NAMED_DICT_KEY = 'testcase_name' + + +class NoTestsError(Exception): + """Raised when parameterized decorators do not generate any tests.""" + + +class DuplicateTestNameError(Exception): + """Raised when a parameterized test has the same test name multiple times.""" + + def __init__(self, test_class_name, new_test_name, original_test_name): + super(DuplicateTestNameError, self).__init__( + 'Duplicate parameterized test name in {}: generated test name {!r} ' + '(generated from {!r}) already exists. Consider using ' + 'named_parameters() to give your tests unique names and/or renaming ' + 'the conflicting test method.'.format( + test_class_name, new_test_name, original_test_name)) + + +def _clean_repr(obj): + return _ADDR_RE.sub(r'<\1>', repr(obj)) + + +def _non_string_or_bytes_iterable(obj): + return (isinstance(obj, abc.Iterable) and not isinstance(obj, str) and + not isinstance(obj, bytes)) + + +def _format_parameter_list(testcase_params): + if isinstance(testcase_params, abc.Mapping): + return ', '.join('%s=%s' % (argname, _clean_repr(value)) + for argname, value in testcase_params.items()) + elif _non_string_or_bytes_iterable(testcase_params): + return ', '.join(map(_clean_repr, testcase_params)) + else: + return _format_parameter_list((testcase_params,)) + + +def _async_wrapped(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + return await func(*args, **kwargs) + return wrapper + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type, original_name=None): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter tuples/dicts for + individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + original_name: The original test method name. When decorated on a test + method, None is passed to __init__ and test_method.__name__ is used. + Note test_method.__name__ might be different than the original defined + test method because of the use of other decorators. A more accurate + value is set by TestGeneratorMetaclass.__new__ later. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + if original_name is None: + original_name = test_method.__name__ + self._original_name = original_name + self.__name__ = _ParameterizedTestIter.__name__ + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run. You may also ' + 'be using another decorator before the parameterized ' + 'one, in which case you should reverse the order.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def make_bound_param_test(testcase_params): + @functools.wraps(test_method) + def bound_param_test(self): + if isinstance(testcase_params, abc.Mapping): + return test_method(self, **testcase_params) + elif _non_string_or_bytes_iterable(testcase_params): + return test_method(self, *testcase_params) + else: + return test_method(self, testcase_params) + + if naming_type is _NAMED: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + bound_param_test.__x_use_name__ = True + + testcase_name = None + if isinstance(testcase_params, abc.Mapping): + if _NAMED_DICT_KEY not in testcase_params: + raise RuntimeError( + 'Dict for named tests must contain key "%s"' % _NAMED_DICT_KEY) + # Create a new dict to avoid modifying the supplied testcase_params. + testcase_name = testcase_params[_NAMED_DICT_KEY] + testcase_params = { + k: v for k, v in testcase_params.items() if k != _NAMED_DICT_KEY + } + elif _non_string_or_bytes_iterable(testcase_params): + if not isinstance(testcase_params[0], str): + raise RuntimeError( + 'The first element of named test parameters is the test name ' + 'suffix and must be a string') + testcase_name = testcase_params[0] + testcase_params = testcase_params[1:] + else: + raise RuntimeError( + 'Named tests must be passed a dict or non-string iterable.') + + test_method_name = self._original_name + # Support PEP-8 underscore style for test naming if used. + if (test_method_name.startswith('test_') + and testcase_name + and not testcase_name.startswith('_')): + test_method_name += '_' + + bound_param_test.__name__ = test_method_name + str(testcase_name) + elif naming_type is _ARGUMENT_REPR: + # If it's a generator, convert it to a tuple and treat them as + # parameters. + if isinstance(testcase_params, types.GeneratorType): + testcase_params = tuple(testcase_params) + # The metaclass creates a unique, but non-descriptive method name for + # _ARGUMENT_REPR tests using an indexed suffix. + # To keep test names descriptive, only the original method name is used. + # To make sure test names are unique, we add a unique descriptive suffix + # __x_params_repr__ for every test. + params_repr = '(%s)' % (_format_parameter_list(testcase_params),) + bound_param_test.__x_params_repr__ = params_repr + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + bound_param_test.__doc__ = '%s(%s)' % ( + bound_param_test.__name__, _format_parameter_list(testcase_params)) + if test_method.__doc__: + bound_param_test.__doc__ += '\n%s' % (test_method.__doc__,) + if inspect.iscoroutinefunction(test_method): + return _async_wrapped(bound_param_test) + return bound_param_test + + return (make_bound_param_test(c) for c in self.testcases) + + +def _modify_class(class_object, testcases, naming_type): + assert not getattr(class_object, '_test_params_reprs', None), ( + 'Cannot add parameters to %s. Either it already has parameterized ' + 'methods, or its super class is also a parameterized class.' % ( + class_object,)) + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + class_object._test_params_reprs = test_params_reprs = {} + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _update_class_dict_for_param_test_case( + class_object.__name__, methods, test_params_reprs, name, + _ParameterizedTestIter(obj, testcases, naming_type, name)) + for meth_name, meth in methods.items(): + setattr(class_object, meth_name, meth) + + +def _parameter_decorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A function for modifying the decorated object. + """ + def _apply(obj): + if isinstance(obj, type): + _modify_class(obj, testcases, naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if (len(testcases) == 1 and + not isinstance(testcases[0], tuple) and + not isinstance(testcases[0], abc.Mapping)): + # Support using a single non-tuple parameter as a list of test cases. + # Note that the single non-tuple parameter can't be Mapping either, which + # means a single dict parameter case. + assert _non_string_or_bytes_iterable(testcases[0]), ( + 'Single parameter argument must be a non-string non-Mapping iterable') + testcases = testcases[0] + + if not isinstance(testcases, abc.Sequence): + testcases = list(testcases) + if not testcases: + raise NoTestsError( + 'parameterized test decorators did not generate any tests. ' + 'Make sure you specify non-empty parameters, ' + 'and do not reuse generators more than once.') + + return _apply + + +def parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests with only one + argument). + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. For every parameter tuple + passed, the first element of the tuple should be a string and will be appended + to the name of the test method. Each parameter dict passed must have a value + for the key "testcase_name", the string representation of that value will be + appended to the name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single iterable, + or a list of tuples or dicts. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_NAMED, testcases) + + +def product(*kwargs_seqs, **testgrid): + """A decorator for running tests over cartesian product of parameters values. + + See the module docstring for a usage example. The test will be run for every + possible combination of the parameters. + + Args: + *kwargs_seqs: Each positional parameter is a sequence of keyword arg dicts; + every test case generated will include exactly one kwargs dict from each + positional parameter; these will then be merged to form an overall list + of arguments for the test case. + **testgrid: A mapping of parameter names and their possible values. Possible + values should given as either a list or a tuple. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + + for name, values in testgrid.items(): + assert isinstance(values, (list, tuple)), ( + 'Values of {} must be given as list or tuple, found {}'.format( + name, type(values))) + + prior_arg_names = set() + for kwargs_seq in kwargs_seqs: + assert ((isinstance(kwargs_seq, (list, tuple))) and + all(isinstance(kwargs, dict) for kwargs in kwargs_seq)), ( + 'Positional parameters must be a sequence of keyword arg' + 'dicts, found {}' + .format(kwargs_seq)) + if kwargs_seq: + arg_names = set(kwargs_seq[0]) + assert all(set(kwargs) == arg_names for kwargs in kwargs_seq), ( + 'Keyword argument dicts within a single parameter must all have the ' + 'same keys, found {}'.format(kwargs_seq)) + assert not (arg_names & prior_arg_names), ( + 'Keyword argument dict sequences must all have distinct argument ' + 'names, found duplicate(s) {}' + .format(sorted(arg_names & prior_arg_names))) + prior_arg_names |= arg_names + + assert not (prior_arg_names & set(testgrid)), ( + 'Arguments supplied in kwargs dicts in positional parameters must not ' + 'overlap with arguments supplied as named parameters; found duplicate ' + 'argument(s) {}'.format(sorted(prior_arg_names & set(testgrid)))) + + # Convert testgrid into a sequence of sequences of kwargs dicts and combine + # with the positional parameters. + # So foo=[1,2], bar=[3,4] --> [[{foo: 1}, {foo: 2}], [{bar: 3, bar: 4}]] + testgrid = (tuple({k: v} for v in vs) for k, vs in testgrid.items()) + testgrid = tuple(kwargs_seqs) + tuple(testgrid) + + # Create all possible combinations of parameters as a cartesian product + # of parameter values. + testcases = [ + dict(itertools.chain.from_iterable(case.items() + for case in cases)) + for cases in itertools.product(*testgrid) + ] + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for adding tests generated by parameterized decorators.""" + + def __new__(cls, class_name, bases, dct): + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + test_params_reprs = dct.setdefault('_test_params_reprs', {}) + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _non_string_or_bytes_iterable(obj)): + # NOTE: `obj` might not be a _ParameterizedTestIter in two cases: + # 1. a class-level iterable named test* that isn't a test, such as + # a list of something. Such attributes get deleted from the class. + # + # 2. If a decorator is applied to the parameterized test, e.g. + # @morestuff + # @parameterized.parameters(...) + # def test_foo(...): ... + # + # This is OK so long as the underlying parameterized function state + # is forwarded (e.g. using functool.wraps() and **without** + # accessing explicitly accessing the internal attributes. + if isinstance(obj, _ParameterizedTestIter): + # Update the original test method name so it's more accurate. + # The mismatch might happen when another decorator is used inside + # the parameterized decrators, and the inner decorator doesn't + # preserve its __name__. + obj._original_name = name + iterator = iter(obj) + dct.pop(name) + _update_class_dict_for_param_test_case( + class_name, dct, test_params_reprs, name, iterator) + # If the base class is a subclass of parameterized.TestCase, inherit its + # _test_params_reprs too. + for base in bases: + # Check if the base has _test_params_reprs first, then check if it's a + # subclass of parameterized.TestCase. Otherwise when this is called for + # the parameterized.TestCase definition itself, this raises because + # itself is not defined yet. This works as long as absltest.TestCase does + # not define _test_params_reprs. + base_test_params_reprs = getattr(base, '_test_params_reprs', None) + if base_test_params_reprs and issubclass(base, TestCase): + for test_method, test_method_id in base_test_params_reprs.items(): + # test_method may both exists in base and this class. + # This class's method overrides base class's. + # That's why it should only inherit it if it does not exist. + test_params_reprs.setdefault(test_method, test_method_id) + + return type.__new__(cls, class_name, bases, dct) + + +def _update_class_dict_for_param_test_case( + test_class_name, dct, test_params_reprs, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + test_class_name: The name of the class tests are added to. + dct: The target dictionary. + test_params_reprs: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + + Raises: + DuplicateTestNameError: Raised when a test name occurs multiple times. + RuntimeError: If non-parameterized functions are generated. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if not (getattr(func, '__x_use_name__', None) or + getattr(func, '__x_params_repr__', None)): + raise RuntimeError( + '{}.{} generated a test function without using the parameterized ' + 'decorators. Only tests generated using the decorators are ' + 'supported.'.format(test_class_name, name)) + + if getattr(func, '__x_use_name__', False): + original_name = func.__name__ + new_name = original_name + else: + original_name = name + new_name = '%s%d' % (original_name, idx) + + if new_name in dct: + raise DuplicateTestNameError(test_class_name, new_name, original_name) + + dct[new_name] = func + test_params_reprs[new_name] = getattr(func, '__x_params_repr__', '') + + +class TestCase(absltest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + # visibility: private; do not call outside this class. + def _get_params_repr(self): + return self._test_params_reprs.get(self._testMethodName, '') + + def __str__(self): + params_repr = self._get_params_repr() + if params_repr: + params_repr = ' ' + params_repr + return '{}{} ({})'.format( + self._testMethodName, params_repr, + unittest.util.strclass(self.__class__)) + + def id(self): + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + base = super(TestCase, self).id() + params_repr = self._get_params_repr() + if params_repr: + # We include the params in the id so that, when reported in the + # test.xml file, the value is more informative than just "test_foo0". + # Use a space to separate them so that it's copy/paste friendly and + # easy to identify the actual test id. + return '{} {}'.format(base, params_repr) + else: + return base + + +# This function is kept CamelCase because it's used as a class's base class. +def CoopTestCase(other_base_class): # pylint: disable=invalid-name + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + ``mox.MoxTestBase``. + + Only works with metaclasses that do not override ``type.__new__``. + + Example:: + + from absl.testing import parameterized + + class ExampleTest(parameterized.CoopTestCase(OtherTestCase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + # If the other base class has a metaclass of 'type' then trying to combine + # the metaclasses will result in an MRO error. So simply combine them and + # return. + if type(other_base_class) == type: # pylint: disable=unidiomatic-typecheck + warnings.warn( + 'CoopTestCase is only necessary when combining with a class that uses' + ' a metaclass. Use multiple inheritance like this instead: class' + f' ExampleTest(paramaterized.TestCase, {other_base_class.__name__}):', + stacklevel=2, + ) + + class CoopTestCaseBase(other_base_class, TestCase): + pass + + return CoopTestCaseBase + else: + + class CoopMetaclass(type(other_base_class), TestGeneratorMetaclass): # pylint: disable=unused-variable + pass + + class CoopTestCaseBase(other_base_class, TestCase, metaclass=CoopMetaclass): + pass + + return CoopTestCaseBase diff --git a/env-llmeval/lib/python3.10/site-packages/absl/testing/xml_reporter.py b/env-llmeval/lib/python3.10/site-packages/absl/testing/xml_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..4fcb60c8545c6fbdcd993964dab61b9f833298e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/absl/testing/xml_reporter.py @@ -0,0 +1,563 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A Python test reporter that generates test reports in JUnit XML format.""" + +import datetime +import re +import sys +import threading +import time +import traceback +import unittest +from xml.sax import saxutils +from absl.testing import _pretty_print_reporter + + +# See http://www.w3.org/TR/REC-xml/#NT-Char +_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD} + + +_control_character_conversions = { + chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes} + + +_escape_xml_attr_conversions = { + '"': '"', + "'": ''', + '\n': ' ', + '\t': ' ', + '\r': ' ', + ' ': ' '} +_escape_xml_attr_conversions.update(_control_character_conversions) + + +# When class or module level function fails, unittest/suite.py adds a +# _ErrorHolder instance instead of a real TestCase, and it has a description +# like "setUpClass (__main__.MyTestCase)". +_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$') + + +# NOTE: while saxutils.quoteattr() theoretically does the same thing; it +# seems to often end up being too smart for it's own good not escaping properly. +# This function is much more reliable. +def _escape_xml_attr(content): + """Escapes xml attributes.""" + # Note: saxutils doesn't escape the quotes. + return saxutils.escape(content, _escape_xml_attr_conversions) + + +def _escape_cdata(s): + """Escapes a string to be used as XML CDATA. + + CDATA characters are treated strictly as character data, not as XML markup, + but there are still certain restrictions on them. + + Args: + s: the string to be escaped. + Returns: + An escaped version of the input string. + """ + for char, escaped in _control_character_conversions.items(): + s = s.replace(char, escaped) + return s.replace(']]>', ']] >') + + +def _iso8601_timestamp(timestamp): + """Produces an ISO8601 datetime. + + Args: + timestamp: an Epoch based timestamp in seconds. + + Returns: + A iso8601 format timestamp if the input is a valid timestamp, None otherwise + """ + if timestamp is None or timestamp < 0: + return None + return datetime.datetime.fromtimestamp( + timestamp, tz=datetime.timezone.utc).isoformat() + + +def _print_xml_element_header(element, attributes, stream, indentation=''): + """Prints an XML header of an arbitrary element. + + Args: + element: element name (testsuites, testsuite, testcase) + attributes: 2-tuple list with (attributes, values) already escaped + stream: output stream to write test report XML to + indentation: indentation added to the element header + """ + stream.write('%s<%s' % (indentation, element)) + for attribute in attributes: + if (len(attribute) == 2 and attribute[0] is not None and + attribute[1] is not None): + stream.write(' %s="%s"' % (attribute[0], attribute[1])) + stream.write('>\n') + +# Copy time.time which ensures the real time is used internally. +# This prevents bad interactions with tests that stub out time. +_time_copy = time.time + +if hasattr(traceback, '_some_str'): + # Use the traceback module str function to format safely. + _safe_str = traceback._some_str +else: + _safe_str = str # pylint: disable=invalid-name + + +class _TestCaseResult(object): + """Private helper for _TextAndXMLTestResult that represents a test result. + + Attributes: + test: A TestCase instance of an individual test method. + name: The name of the individual test method. + full_class_name: The full name of the test class. + run_time: The duration (in seconds) it took to run the test. + start_time: Epoch relative timestamp of when test started (in seconds) + errors: A list of error 4-tuples. Error tuple entries are + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: A string explaining why the test was skipped. + """ + + def __init__(self, test): + self.run_time = -1 + self.start_time = -1 + self.skip_reason = None + self.errors = [] + self.test = test + + # Parse the test id to get its test name and full class path. + # Unfortunately there is no better way of knowning the test and class. + # Worse, unittest uses _ErrorHandler instances to represent class / module + # level failures. + test_desc = test.id() or str(test) + # Check if it's something like "setUpClass (__main__.TestCase)". + match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc) + if match: + name = match.group(1) + full_class_name = match.group(2) + else: + class_name = unittest.util.strclass(test.__class__) + if isinstance(test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + class_name = unittest.util.strclass(test.test_case.__class__) + if test_desc.startswith(class_name + '.'): + # In a typical unittest.TestCase scenario, test.id() returns with + # a class name formatted using unittest.util.strclass. + name = test_desc[len(class_name)+1:] + full_class_name = class_name + else: + # Otherwise make a best effort to guess the test name and full class + # path. + parts = test_desc.rsplit('.', 1) + name = parts[-1] + full_class_name = parts[0] if len(parts) == 2 else '' + self.name = _escape_xml_attr(name) + self.full_class_name = _escape_xml_attr(full_class_name) + + def set_run_time(self, time_in_secs): + self.run_time = time_in_secs + + def set_start_time(self, time_in_secs): + self.start_time = time_in_secs + + def print_xml_summary(self, stream): + """Prints an XML Summary of a TestCase. + + Status and result are populated as per JUnit XML test result reporter. + A test that has been skipped will always have a skip reason, + as every skip method in Python's unittest requires the reason arg to be + passed. + + Args: + stream: output stream to write test report XML to + """ + + if self.skip_reason is None: + status = 'run' + result = 'completed' + else: + status = 'notrun' + result = 'suppressed' + + test_case_attributes = [ + ('name', '%s' % self.name), + ('status', '%s' % status), + ('result', '%s' % result), + ('time', '%.3f' % self.run_time), + ('classname', self.full_class_name), + ('timestamp', _iso8601_timestamp(self.start_time)), + ] + _print_xml_element_header('testcase', test_case_attributes, stream, ' ') + self._print_testcase_details(stream) + stream.write(' \n') + + def _print_testcase_details(self, stream): + for error in self.errors: + outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence + message = _escape_xml_attr(_safe_str(message)) + exception_type = _escape_xml_attr(str(exception_type)) + error_msg = _escape_cdata(error_msg) + stream.write(' <%s message="%s" type="%s">\n' + % (outcome, message, exception_type, error_msg, outcome)) + + +class _TestSuiteResult(object): + """Private helper for _TextAndXMLTestResult.""" + + def __init__(self): + self.suites = {} + self.failure_counts = {} + self.error_counts = {} + self.overall_start_time = -1 + self.overall_end_time = -1 + self._testsuites_properties = {} + + def add_test_case_result(self, test_case_result): + suite_name = type(test_case_result.test).__name__ + if suite_name == '_ErrorHolder': + # _ErrorHolder is a special case created by unittest for class / module + # level functions. + suite_name = test_case_result.full_class_name.rsplit('.')[-1] + if isinstance(test_case_result.test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + suite_name = type(test_case_result.test.test_case).__name__ + + self._setup_test_suite(suite_name) + self.suites[suite_name].append(test_case_result) + for error in test_case_result.errors: + # Only count the first failure or error so that the sum is equal to the + # total number of *testcases* that have failures or errors. + if error[0] == 'failure': + self.failure_counts[suite_name] += 1 + break + elif error[0] == 'error': + self.error_counts[suite_name] += 1 + break + + def print_xml_summary(self, stream): + overall_test_count = sum(len(x) for x in self.suites.values()) + overall_failures = sum(self.failure_counts.values()) + overall_errors = sum(self.error_counts.values()) + overall_attributes = [ + ('name', ''), + ('tests', '%d' % overall_test_count), + ('failures', '%d' % overall_failures), + ('errors', '%d' % overall_errors), + ('time', '%.3f' % (self.overall_end_time - self.overall_start_time)), + ('timestamp', _iso8601_timestamp(self.overall_start_time)), + ] + _print_xml_element_header('testsuites', overall_attributes, stream) + if self._testsuites_properties: + stream.write(' \n') + for name, value in sorted(self._testsuites_properties.items()): + stream.write(' \n' % + (_escape_xml_attr(name), _escape_xml_attr(str(value)))) + stream.write(' \n') + + for suite_name in self.suites: + suite = self.suites[suite_name] + suite_end_time = max(x.start_time + x.run_time for x in suite) + suite_start_time = min(x.start_time for x in suite) + failures = self.failure_counts[suite_name] + errors = self.error_counts[suite_name] + suite_attributes = [ + ('name', '%s' % suite_name), + ('tests', '%d' % len(suite)), + ('failures', '%d' % failures), + ('errors', '%d' % errors), + ('time', '%.3f' % (suite_end_time - suite_start_time)), + ('timestamp', _iso8601_timestamp(suite_start_time)), + ] + _print_xml_element_header('testsuite', suite_attributes, stream) + + # test_case_result entries are not guaranteed to be in any user-friendly + # order, especially when using subtests. So sort them. + for test_case_result in sorted(suite, key=lambda t: t.name): + test_case_result.print_xml_summary(stream) + stream.write('\n') + stream.write('\n') + + def _setup_test_suite(self, suite_name): + """Adds a test suite to the set of suites tracked by this test run. + + Args: + suite_name: string, The name of the test suite being initialized. + """ + if suite_name in self.suites: + return + self.suites[suite_name] = [] + self.failure_counts[suite_name] = 0 + self.error_counts[suite_name] = 0 + + def set_end_time(self, timestamp_in_secs): + """Sets the start timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_end_time = timestamp_in_secs + + def set_start_time(self, timestamp_in_secs): + """Sets the end timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_start_time = timestamp_in_secs + + +class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult): + """Private TestResult class that produces both formatted text results and XML. + + Used by TextAndXMLTestRunner. + """ + + _TEST_SUITE_RESULT_CLASS = _TestSuiteResult + _TEST_CASE_RESULT_CLASS = _TestCaseResult + + def __init__(self, xml_stream, stream, descriptions, verbosity, + time_getter=_time_copy, testsuites_properties=None): + super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity) + self.xml_stream = xml_stream + self.pending_test_case_results = {} + self.suite = self._TEST_SUITE_RESULT_CLASS() + if testsuites_properties: + self.suite._testsuites_properties = testsuites_properties + self.time_getter = time_getter + + # This lock guards any mutations on pending_test_case_results. + self._pending_test_case_results_lock = threading.RLock() + + def startTest(self, test): + self.start_time = self.time_getter() + super(_TextAndXMLTestResult, self).startTest(test) + + def stopTest(self, test): + # Grabbing the write lock to avoid conflicting with stopTestRun. + with self._pending_test_case_results_lock: + super(_TextAndXMLTestResult, self).stopTest(test) + result = self.get_pending_test_case_result(test) + if not result: + test_name = test.id() or str(test) + sys.stderr.write('No pending test case: %s\n' % test_name) + return + if getattr(self, 'start_time', None) is None: + # startTest may not be called for skipped tests since Python 3.12.1. + self.start_time = self.time_getter() + test_id = id(test) + run_time = self.time_getter() - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + del self.pending_test_case_results[test_id] + + def startTestRun(self): + self.suite.set_start_time(self.time_getter()) + super(_TextAndXMLTestResult, self).startTestRun() + + def stopTestRun(self): + self.suite.set_end_time(self.time_getter()) + # All pending_test_case_results will be added to the suite and removed from + # the pending_test_case_results dictionary. Grabbing the write lock to avoid + # results from being added during this process to avoid duplicating adds or + # accidentally erasing newly appended pending results. + with self._pending_test_case_results_lock: + # Errors in the test fixture (setUpModule, tearDownModule, + # setUpClass, tearDownClass) can leave a pending result which + # never gets added to the suite. The runner calls stopTestRun + # which gives us an opportunity to add these errors for + # reporting here. + for test_id in self.pending_test_case_results: + result = self.pending_test_case_results[test_id] + if getattr(self, 'start_time', None) is not None: + run_time = self.suite.overall_end_time - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + self.pending_test_case_results.clear() + + def _exc_info_to_string(self, err, test=None): + """Converts a sys.exc_info()-style tuple of values into a string. + + This method must be overridden because the method signature in + unittest.TestResult changed between Python 2.2 and 2.4. + + Args: + err: A sys.exc_info() tuple of values for an error. + test: The test method. + + Returns: + A formatted exception string. + """ + if test: + return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test) + return ''.join(traceback.format_exception(*err)) + + def add_pending_test_case_result(self, test, error_summary=None, + skip_reason=None): + """Adds result information to a test case result which may still be running. + + If a result entry for the test already exists, add_pending_test_case_result + will add error summary tuples and/or overwrite skip_reason for the result. + If it does not yet exist, a result entry will be created. + Note that a test result is considered to have been run and passed + only if there are no errors or skip_reason. + + Args: + test: A test method as defined by unittest + error_summary: A 4-tuple with the following entries: + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: a string explaining why the test was skipped + """ + with self._pending_test_case_results_lock: + test_id = id(test) + if test_id not in self.pending_test_case_results: + self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS( + test) + if error_summary: + self.pending_test_case_results[test_id].errors.append(error_summary) + if skip_reason: + self.pending_test_case_results[test_id].skip_reason = skip_reason + + def delete_pending_test_case_result(self, test): + with self._pending_test_case_results_lock: + test_id = id(test) + del self.pending_test_case_results[test_id] + + def get_pending_test_case_result(self, test): + test_id = id(test) + return self.pending_test_case_results.get(test_id, None) + + def addSuccess(self, test): + super(_TextAndXMLTestResult, self).addSuccess(test) + self.add_pending_test_case_result(test) + + def addError(self, test, err): + super(_TextAndXMLTestResult, self).addError(test, err) + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addFailure(self, test, err): + super(_TextAndXMLTestResult, self).addFailure(test, err) + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSkip(self, test, reason): + super(_TextAndXMLTestResult, self).addSkip(test, reason) + self.add_pending_test_case_result(test, skip_reason=reason) + + def addExpectedFailure(self, test, err): + super(_TextAndXMLTestResult, self).addExpectedFailure(test, err) + if callable(getattr(test, 'recordProperty', None)): + test.recordProperty('EXPECTED_FAILURE', + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test) + + def addUnexpectedSuccess(self, test): + super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test) + test_name = test.id() or str(test) + error_summary = ('error', '', '', + 'Test case %s should have failed, but passed.' + % (test_name)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name + super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err) + if err is not None: + if issubclass(err[0], test.failureException): + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = None + self.add_pending_test_case_result(subtest, error_summary=error_summary) + + def printErrors(self): + super(_TextAndXMLTestResult, self).printErrors() + self.xml_stream.write('\n') + self.suite.print_xml_summary(self.xml_stream) + + +class TextAndXMLTestRunner(unittest.TextTestRunner): + """A test runner that produces both formatted text results and XML. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + + _TEST_RESULT_CLASS = _TextAndXMLTestResult + + _xml_stream = None + _testsuites_properties = {} + + def __init__(self, xml_stream=None, *args, **kwargs): + """Initialize a TextAndXMLTestRunner. + + Args: + xml_stream: file-like or None; XML-formatted test results are output + via this object's write() method. If None (the default), the + new instance behaves as described in the set_default_xml_stream method + documentation below. + *args: passed unmodified to unittest.TextTestRunner.__init__. + **kwargs: passed unmodified to unittest.TextTestRunner.__init__. + """ + super(TextAndXMLTestRunner, self).__init__(*args, **kwargs) + if xml_stream is not None: + self._xml_stream = xml_stream + # else, do not set self._xml_stream to None -- this allows implicit fallback + # to the class attribute's value. + + @classmethod + def set_default_xml_stream(cls, xml_stream): + """Sets the default XML stream for the class. + + Args: + xml_stream: file-like or None; used for instances when xml_stream is None + or not passed to their constructors. If None is passed, instances + created with xml_stream=None will act as ordinary TextTestRunner + instances; this is the default state before any calls to this method + have been made. + """ + cls._xml_stream = xml_stream + + def _makeResult(self): + if self._xml_stream is None: + return super(TextAndXMLTestRunner, self)._makeResult() + else: + return self._TEST_RESULT_CLASS( + self._xml_stream, self.stream, self.descriptions, self.verbosity, + testsuites_properties=self._testsuites_properties) + + @classmethod + def set_testsuites_property(cls, key, value): + cls._testsuites_properties[key] = value diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..617b6a1bb78bfeac2b44253ff25126b917bc4fd4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/METADATA @@ -0,0 +1,378 @@ +Metadata-Version: 2.1 +Name: accelerate +Version: 0.29.2 +Summary: Accelerate +Home-page: https://github.com/huggingface/accelerate +Author: The HuggingFace team +Author-email: zach.mueller@huggingface.co +License: Apache +Keywords: deep learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy (>=1.17) +Requires-Dist: packaging (>=20.0) +Requires-Dist: psutil +Requires-Dist: pyyaml +Requires-Dist: torch (>=1.10.0) +Requires-Dist: huggingface-hub +Requires-Dist: safetensors (>=0.3.1) +Provides-Extra: dev +Requires-Dist: black (~=23.1) ; extra == 'dev' +Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'dev' +Requires-Dist: ruff (~=0.2.1) ; extra == 'dev' +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Requires-Dist: pytest-subtests ; extra == 'dev' +Requires-Dist: parameterized ; extra == 'dev' +Requires-Dist: datasets ; extra == 'dev' +Requires-Dist: evaluate ; extra == 'dev' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'dev' +Requires-Dist: transformers ; extra == 'dev' +Requires-Dist: scipy ; extra == 'dev' +Requires-Dist: scikit-learn ; extra == 'dev' +Requires-Dist: deepspeed ; extra == 'dev' +Requires-Dist: tqdm ; extra == 'dev' +Requires-Dist: bitsandbytes ; extra == 'dev' +Requires-Dist: timm ; extra == 'dev' +Requires-Dist: rich ; extra == 'dev' +Provides-Extra: docs +Provides-Extra: quality +Requires-Dist: black (~=23.1) ; extra == 'quality' +Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'quality' +Requires-Dist: ruff (~=0.2.1) ; extra == 'quality' +Provides-Extra: rich +Requires-Dist: rich ; extra == 'rich' +Provides-Extra: sagemaker +Requires-Dist: sagemaker ; extra == 'sagemaker' +Provides-Extra: test_dev +Requires-Dist: datasets ; extra == 'test_dev' +Requires-Dist: evaluate ; extra == 'test_dev' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'test_dev' +Requires-Dist: transformers ; extra == 'test_dev' +Requires-Dist: scipy ; extra == 'test_dev' +Requires-Dist: scikit-learn ; extra == 'test_dev' +Requires-Dist: deepspeed ; extra == 'test_dev' +Requires-Dist: tqdm ; extra == 'test_dev' +Requires-Dist: bitsandbytes ; extra == 'test_dev' +Requires-Dist: timm ; extra == 'test_dev' +Provides-Extra: test_prod +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'test_prod' +Requires-Dist: pytest-xdist ; extra == 'test_prod' +Requires-Dist: pytest-subtests ; extra == 'test_prod' +Requires-Dist: parameterized ; extra == 'test_prod' +Provides-Extra: test_trackers +Requires-Dist: wandb ; extra == 'test_trackers' +Requires-Dist: comet-ml ; extra == 'test_trackers' +Requires-Dist: tensorboard ; extra == 'test_trackers' +Requires-Dist: dvclive ; extra == 'test_trackers' +Provides-Extra: testing +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'testing' +Requires-Dist: pytest-xdist ; extra == 'testing' +Requires-Dist: pytest-subtests ; extra == 'testing' +Requires-Dist: parameterized ; extra == 'testing' +Requires-Dist: datasets ; extra == 'testing' +Requires-Dist: evaluate ; extra == 'testing' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'testing' +Requires-Dist: transformers ; extra == 'testing' +Requires-Dist: scipy ; extra == 'testing' +Requires-Dist: scikit-learn ; extra == 'testing' +Requires-Dist: deepspeed ; extra == 'testing' +Requires-Dist: tqdm ; extra == 'testing' +Requires-Dist: bitsandbytes ; extra == 'testing' +Requires-Dist: timm ; extra == 'testing' + + + +

+
+ +
+

+ +

+ + + License + + + Documentation + + + GitHub release + + + Contributor Covenant + +

+ +

+

Run your *raw* PyTorch training script on any kind of device +

+ +

+ +

+ +## Easy to integrate + +🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16. + +🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged. + +Here is an example: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + ++ accelerator = Accelerator() +- device = 'cpu' ++ device = accelerator.device + + model = torch.nn.Transformer().to(device) + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: + source = source.to(device) + targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16). + +In particular, the same code can then be run without modification on your local machine for debugging or your training environment. + +🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + +- device = 'cpu' ++ accelerator = Accelerator() + +- model = torch.nn.Transformer().to(device) ++ model = torch.nn.Transformer() + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: +- source = source.to(device) +- targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples). + +## Launching script + +🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training! +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo): + +```bash +accelerate launch examples/nlp_example.py +``` + +This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience. + +You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`. + +For example, here is how to launch on two GPUs: + +```bash +accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py +``` + +To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli). + +## Launching multi-CPU run using MPI + +🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well. +Once you have MPI setup on your cluster, just run: +```bash +accelerate config +``` +Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun. +Then, use `accelerate launch` with your script like: +```bash +accelerate launch examples/nlp_example.py +``` +Alternatively, you can use mpirun directly, without using the CLI like: +```bash +mpirun -np 2 python examples/nlp_example.py +``` + +## Launching training using DeepSpeed + +🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`. + +```python +from accelerate import Accelerator, DeepSpeedPlugin + +# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it +# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed +deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2) +accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin) + +# How to save your 🤗 Transformer? +accelerator.wait_for_everyone() +unwrapped_model = accelerator.unwrap_model(model) +unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model)) +``` + +Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue. + +## Launching your training from a notebook + +🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add: + +```python +from accelerate import notebook_launcher + +notebook_launcher(training_function) +``` + +An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) + +## Why should I use 🤗 Accelerate? + +You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object. + +## Why shouldn't I use 🤗 Accelerate? + +You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them. + +## Frameworks using 🤗 Accelerate + +If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below: + +* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development. +* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76). +* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic. +* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms. +* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses. +* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products. +* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library. +* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so. +* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves! +* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion. +* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric. +* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side). + + +## Installation + +This repository is tested on Python 3.8+ and PyTorch 1.10.0+ + +You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). + +First, create a virtual environment with the version of Python you're going to use and activate it. + +Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows: + +```bash +pip install accelerate +``` + +## Supported integrations + +- CPU only +- multi-CPU on one node (machine) +- multi-CPU on several nodes (machines) +- single GPU +- multi-GPU on one node (machine) +- multi-GPU on several nodes (machines) +- TPU +- FP16/BFloat16 mixed precision +- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) +- DeepSpeed support (Experimental) +- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental) +- Megatron-LM support (Experimental) + +## Citing 🤗 Accelerate + +If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry. + +```bibtex +@Misc{accelerate, + title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.}, + author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan}, + howpublished = {\url{https://github.com/huggingface/accelerate}}, + year = {2022} +} +``` diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2fa7ae31f2d47e883fc51bc8cf9695a3da09e930 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/RECORD @@ -0,0 +1,163 @@ +../../../bin/accelerate,sha256=BMhR7IPBrkcxmZ7nWXIeRgnrnhJmrdjee8KIXhgtYdk,259 +../../../bin/accelerate-config,sha256=_nmL3uln1BEsmG5LBCWZc-Jrz6bVgsNc-KdbFNHdRs8,251 +../../../bin/accelerate-estimate-memory,sha256=gmoXcy4yYs5YHPtp71NGOdpsG5WD0oZaZdNTFK-I77E,253 +../../../bin/accelerate-launch,sha256=KIOgqpBnhsRCaEi_V3uM_yO_nOLRuKSASHtce3c8OHU,251 +accelerate-0.29.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +accelerate-0.29.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +accelerate-0.29.2.dist-info/METADATA,sha256=FjUkmeCY24ElAVCof9Q9TqEllme-f6G0FbuJR9PfjWY,18942 +accelerate-0.29.2.dist-info/RECORD,, +accelerate-0.29.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +accelerate-0.29.2.dist-info/entry_points.txt,sha256=Z_KV59tIt4oZtUDEQ0w8JThJ6_1dd8vR8heH24DeAXI,238 +accelerate-0.29.2.dist-info/top_level.txt,sha256=esVfdxTidsjQ90zsN_rPpjLFJ4ijRlx4mnLrG09hlt4,11 +accelerate/__init__.py,sha256=7PnXknD50D0dgEHWiHkz-2s1bLDKCwrYetYFxSzqd6A,1456 +accelerate/__pycache__/__init__.cpython-310.pyc,, +accelerate/__pycache__/accelerator.cpython-310.pyc,, +accelerate/__pycache__/big_modeling.cpython-310.pyc,, +accelerate/__pycache__/checkpointing.cpython-310.pyc,, +accelerate/__pycache__/data_loader.cpython-310.pyc,, +accelerate/__pycache__/hooks.cpython-310.pyc,, +accelerate/__pycache__/inference.cpython-310.pyc,, +accelerate/__pycache__/launchers.cpython-310.pyc,, +accelerate/__pycache__/local_sgd.cpython-310.pyc,, +accelerate/__pycache__/logging.cpython-310.pyc,, +accelerate/__pycache__/memory_utils.cpython-310.pyc,, +accelerate/__pycache__/optimizer.cpython-310.pyc,, +accelerate/__pycache__/scheduler.cpython-310.pyc,, +accelerate/__pycache__/state.cpython-310.pyc,, +accelerate/__pycache__/tracking.cpython-310.pyc,, +accelerate/accelerator.py,sha256=rh4-KBMCkCGLldjKo1CRtBIbsXG76fJqYWdgOugaw7w,143024 +accelerate/big_modeling.py,sha256=eytO1vW8nxNNkyb8o9uh4L8iavzwJxbxc9selyG_8MU,29034 +accelerate/checkpointing.py,sha256=vFyLNg9-8qsPBYhAkcm-WwKEeK5Lrq9qLrQWNGFKoPk,11378 +accelerate/commands/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606 +accelerate/commands/__pycache__/__init__.cpython-310.pyc,, +accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc,, +accelerate/commands/__pycache__/env.cpython-310.pyc,, +accelerate/commands/__pycache__/estimate.cpython-310.pyc,, +accelerate/commands/__pycache__/launch.cpython-310.pyc,, +accelerate/commands/__pycache__/test.cpython-310.pyc,, +accelerate/commands/__pycache__/tpu.cpython-310.pyc,, +accelerate/commands/__pycache__/utils.cpython-310.pyc,, +accelerate/commands/accelerate_cli.py,sha256=i3nge5Wj8i4zkV0CVIk9P8veleRZbTZY0AU4fJOrKF8,1749 +accelerate/commands/config/__init__.py,sha256=iJK8dgj3pc5Vdr1E7UuGoFu-BlybyXLxYDoTg9gXngE,1645 +accelerate/commands/config/__pycache__/__init__.cpython-310.pyc,, +accelerate/commands/config/__pycache__/cluster.cpython-310.pyc,, +accelerate/commands/config/__pycache__/config.cpython-310.pyc,, +accelerate/commands/config/__pycache__/config_args.cpython-310.pyc,, +accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc,, +accelerate/commands/config/__pycache__/default.cpython-310.pyc,, +accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc,, +accelerate/commands/config/__pycache__/update.cpython-310.pyc,, +accelerate/commands/config/cluster.py,sha256=lA55beGeo0fAowfffKhf8nGcy6lBjaOxTtV-Yg_Rz6s,29926 +accelerate/commands/config/config.py,sha256=FuRlQvOjgATEtyqOSsGD-KEtOCvACOHjs2C-krrtldk,3035 +accelerate/commands/config/config_args.py,sha256=hE42coVnn0UU-ysqp2ZH-jlqaXoPaHt5E_3qxT42GIM,10024 +accelerate/commands/config/config_utils.py,sha256=DcjIV1mDInFmct2_XQ-9KYAkREINs6YuHRbZe5HFjT8,2926 +accelerate/commands/config/default.py,sha256=3-SdEhl_zXM9S3f-FxkSVtiBQ5VY-QNsC4O26u60bss,5350 +accelerate/commands/config/sagemaker.py,sha256=GjHE2-h4tRr1P_PFtMF3miiAtJlzkbHbMb6kFXqn8eo,10341 +accelerate/commands/config/update.py,sha256=NXW1J7GkUHpg71QlIXsmMB_0z8S8IZo2FWax5POwrhc,2395 +accelerate/commands/env.py,sha256=HXXUozMFlxs0b-bU2a3nEcXwYz-5EBkfCvE9svqeN2U,3595 +accelerate/commands/estimate.py,sha256=shEn2nXyHmz94zpAzV2R8__lcNYW9f9djl7bOHoo04k,12398 +accelerate/commands/launch.py,sha256=rYmkdc0Kbcux4TOqBG_sJN-NNc4nmV90vuwHqhGNfWw,41439 +accelerate/commands/menu/__init__.py,sha256=uqSlBM0TFHBwzdv3p3SXfpAk1lZFp4h1a7mbBdscPHs,645 +accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc,, +accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc,, +accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc,, +accelerate/commands/menu/__pycache__/input.cpython-310.pyc,, +accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc,, +accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc,, +accelerate/commands/menu/cursor.py,sha256=-lmpJVAzvNc0c3EOtSuLoKB59zqylVCbYyWLPnrOmvQ,2028 +accelerate/commands/menu/helpers.py,sha256=KrSB5fJjH4MUEUAQJ6bYaN16AYcnl9UalDrPD3DYeeg,1483 +accelerate/commands/menu/input.py,sha256=Uj9eDp8-Mb0Fe49nuogqo9W_RCfYd6udfjiPKx7Wjmg,2537 +accelerate/commands/menu/keymap.py,sha256=eXj-suyYs1m5dEHoUKN4mKAMLc8DWHnwhP6G6JSU0jQ,4086 +accelerate/commands/menu/selection_menu.py,sha256=bxy-DHaKKC6SCToOlMBv5_z0MdUzylEg6Sio9OuV3GM,4921 +accelerate/commands/test.py,sha256=YrPYEaAACOGZ6btn2MV6NbMSEdBUcMWADLbQWaZSHtk,2149 +accelerate/commands/tpu.py,sha256=KyxDP7IuveidZrbW4rx2s8Ku3o_ptI6tzwr_R7ck0os,5548 +accelerate/commands/utils.py,sha256=ilcfE32oHh28EToM00nc_SR6upfZiuxUI0AjjZu8KYY,3995 +accelerate/data_loader.py,sha256=qQojnHAW0cjTL7jLQN_g-oHlRZBkKzti3ifk84Izuw4,48307 +accelerate/hooks.py,sha256=x0FBwwoy6PKSwulavYTpc4gERIoB7RHGPF0Qe6qjXNA,31244 +accelerate/inference.py,sha256=Ci7kkw2cocNpuvmbo1ytW2QgcI_HKWoXkIdonFOr0tg,7977 +accelerate/launchers.py,sha256=iFDZ7seDdRwHAHy1BbVPmPccAONiPdV2aBOHNuT2ZD8,11375 +accelerate/local_sgd.py,sha256=v0-AxldUSCYCI-rqjLiEHsVtSqyEIWTC5ppn7CW7qfY,4002 +accelerate/logging.py,sha256=kvUvk33r_7T2BNzIwqRZBOhuC-50Ju4rm4HbsM6h2G8,4897 +accelerate/memory_utils.py,sha256=3R5LoeHl6GgTZ-IMPrDZMdaEehWarGdPqODushb-6pg,862 +accelerate/optimizer.py,sha256=H7e1XwEysZ_GFR8V_3bHjFAY7zzrzO8samCyW_r7dZo,7453 +accelerate/scheduler.py,sha256=des_4M_Tt1W8gCYZZbLla0GHBEgJY3Wx2EGBQPTzeiY,4238 +accelerate/state.py,sha256=9EDuO_uxO2-SxFnVSjUQuR9lDPMYXjwd1yfAgM-Oiws,50116 +accelerate/test_utils/__init__.py,sha256=amEDYw-ztgIvHkYT3mv3ixk1QJirUnf6jfPJzqUUYkQ,1459 +accelerate/test_utils/__pycache__/__init__.cpython-310.pyc,, +accelerate/test_utils/__pycache__/examples.cpython-310.pyc,, +accelerate/test_utils/__pycache__/testing.cpython-310.pyc,, +accelerate/test_utils/__pycache__/training.cpython-310.pyc,, +accelerate/test_utils/examples.py,sha256=jRm1S9TkmeoLaqprBvtVFN4LesiaDZtKMNIoLNY2euw,7281 +accelerate/test_utils/scripts/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606 +accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc,, +accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606 +accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc,, +accelerate/test_utils/scripts/external_deps/test_checkpointing.py,sha256=zILzHevzqxB1NPPDrJ1furaitI8MTvhBeG9QzzL0bmE,10668 +accelerate/test_utils/scripts/external_deps/test_metrics.py,sha256=67-S1qeCpCL9ceaH22RsIsBJscMS7VQWaO4Krcszzbw,12133 +accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py,sha256=D0YnKCxkI4ZwDOmZ5Ev6hL9jPyP7SU4WffpVFiK14bs,11072 +accelerate/test_utils/scripts/external_deps/test_performance.py,sha256=8fV3wCM1H9HVRRyC5C4EGWt-9aHILX_y3-E7LfSiv7M,9803 +accelerate/test_utils/scripts/external_deps/test_pippy.py,sha256=RdMoD1rlLKMyjyl0soSqR3iDbGidS6-z5GHo3bJUOw8,4647 +accelerate/test_utils/scripts/external_deps/test_zero3_integration.py,sha256=bJ0Jio-6OCyS2FIgFmZi3duqG1gbkOoTEcHsrORYIL4,1503 +accelerate/test_utils/scripts/test_cli.py,sha256=qfk1aYFtdvYFCYPkl05602SNGvk08QTv0xZVVcFVtzM,833 +accelerate/test_utils/scripts/test_distributed_data_loop.py,sha256=VqFPKNRu8yx2MoZ4nHy5wRocEthSymcIA2mg1knqDq8,8315 +accelerate/test_utils/scripts/test_notebook.py,sha256=Q4OOWHa_GMmzwfiq71BTpKYmhCHLC02J42OO94ut9xk,1629 +accelerate/test_utils/scripts/test_ops.py,sha256=BcGn3xJT2wUJ0Yk_6VLNkneSv9z24JeAoQjsgdIIRr4,6170 +accelerate/test_utils/scripts/test_script.py,sha256=QyHRWvHQm1XWkAH7YilQ0gZe3zwvEkyqD6JXmneWqak,32059 +accelerate/test_utils/scripts/test_sync.py,sha256=3kltq-GuUjOVuo6_FOuWiPyc5f3pGiqiwEAbex5x_-o,18263 +accelerate/test_utils/testing.py,sha256=HIp7n6qPMh8KPbwEzNWu5mzfxnQRcU15EQ1AQKehpo0,20571 +accelerate/test_utils/training.py,sha256=8k_YAQ21MzUdb2aFWq1t2fihW1b-iBGh1OJSL3whY68,4019 +accelerate/tracking.py,sha256=WLY-H1DTsxrz4BVzle7QZMp0Irg84yFMbA1e6JaY3pM,39789 +accelerate/utils/__init__.py,sha256=SEP34Od2TbTZt7AbhPJoWWDxFoNMeNEyAuVfaPgVu7k,6065 +accelerate/utils/__pycache__/__init__.cpython-310.pyc,, +accelerate/utils/__pycache__/bnb.cpython-310.pyc,, +accelerate/utils/__pycache__/constants.cpython-310.pyc,, +accelerate/utils/__pycache__/dataclasses.cpython-310.pyc,, +accelerate/utils/__pycache__/deepspeed.cpython-310.pyc,, +accelerate/utils/__pycache__/environment.cpython-310.pyc,, +accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc,, +accelerate/utils/__pycache__/imports.cpython-310.pyc,, +accelerate/utils/__pycache__/launch.cpython-310.pyc,, +accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc,, +accelerate/utils/__pycache__/memory.cpython-310.pyc,, +accelerate/utils/__pycache__/modeling.cpython-310.pyc,, +accelerate/utils/__pycache__/offload.cpython-310.pyc,, +accelerate/utils/__pycache__/operations.cpython-310.pyc,, +accelerate/utils/__pycache__/other.cpython-310.pyc,, +accelerate/utils/__pycache__/random.cpython-310.pyc,, +accelerate/utils/__pycache__/rich.cpython-310.pyc,, +accelerate/utils/__pycache__/torch_xla.cpython-310.pyc,, +accelerate/utils/__pycache__/tqdm.cpython-310.pyc,, +accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc,, +accelerate/utils/__pycache__/versions.cpython-310.pyc,, +accelerate/utils/bnb.py,sha256=3i59dy8EcBYJEnT2alJ5_M-zeIpFsrceQ4bImiJJKOk,20570 +accelerate/utils/constants.py,sha256=e6Bpf7gSZLFkvfr-1B1841b6lVoKJ5uyyf5kefe0aT4,2566 +accelerate/utils/dataclasses.py,sha256=QSP-gYjXz68s0PAseKwLHRBQUnzcBQwPk80otV4X20k,74253 +accelerate/utils/deepspeed.py,sha256=1JFnz-dY6xP9yHywnX8bzZNq-d-8Cpg5CvVNLZ74b_0,10276 +accelerate/utils/environment.py,sha256=8eVGMCu7xT1y0Hxochnxz_RghDePtWo2TghDlOm5Gf0,10409 +accelerate/utils/fsdp_utils.py,sha256=QURWBtK8D00zppqJko0yeznEovXvnkRLI0NpPPkog1Q,10667 +accelerate/utils/imports.py,sha256=gYj_W3E5V83dYlSqqYE89OAK6JonzwhlcEjsJcOpB3E,12232 +accelerate/utils/launch.py,sha256=hHpcnR0NrSmqaT7AIaeIeXOAJVIhWnWdq3kA1XSnOYs,27459 +accelerate/utils/megatron_lm.py,sha256=IfHrtMiPSwuzh5ri96rTTIcEluuMNuIj3O8Y4jW6Fzk,57124 +accelerate/utils/memory.py,sha256=VxJCU-tMX8uE34GbJnxtDXYPHh4D9p2Y-d6rkGxqSa0,5200 +accelerate/utils/modeling.py,sha256=OfTHPg7oM9-jzYotLZjJKj6TrhCTFV3qOtQAOhKXmzQ,80246 +accelerate/utils/offload.py,sha256=qjaVai81wbkA0YH2WkmOXvZT0BRphygfRV_4Ua4j4U4,7837 +accelerate/utils/operations.py,sha256=zsmRx8mP2eoImPc42pOmBIqaHX7RDugw8AZ_HF3onpg,30610 +accelerate/utils/other.py,sha256=kgON65EhzQN3oQZqzgAOmmNC2vsQkeO77qEuzN7Zv7c,12283 +accelerate/utils/random.py,sha256=t-HsLQRm8etSiLSyONCU9wNhj-0VjDUyDme9p6RxDNU,4881 +accelerate/utils/rich.py,sha256=8JZX_uGMQX-BufdXxJpdne7BWd1KyLHSgbiGxrDMYr8,847 +accelerate/utils/torch_xla.py,sha256=Pq1tuqN0X_pWDVza6YgjfO45uoJdoRVRForLeLQzFus,1908 +accelerate/utils/tqdm.py,sha256=9Ovx4GL8AvjSaBd_OysoUGPW9ZJ3ZBOde6776HMEMOA,1344 +accelerate/utils/transformer_engine.py,sha256=gNPkOv_D1SDLm6nVZtxWIjyA6snxWtAQeBWUZLIErJE,3582 +accelerate/utils/versions.py,sha256=UgmcbjBm--6CIx1ZamSAMjAK_B_2l48LbeaNygqej8M,2149 diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..db491c83cf468945b19a098b171fcbfa7d7db1d1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +accelerate = accelerate.commands.accelerate_cli:main +accelerate-config = accelerate.commands.config:main +accelerate-estimate-memory = accelerate.commands.estimate:main +accelerate-launch = accelerate.commands.launch:main diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a9368375be0e0e13fdad0eea4b92541bd9e1f594 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate-0.29.2.dist-info/top_level.txt @@ -0,0 +1 @@ +accelerate diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..62b076cdee58ec8f34034141ba0befd9015b0c7e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c688a62848560861b62738ef0b70d373c4980baf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/METADATA @@ -0,0 +1,66 @@ +Metadata-Version: 2.1 +Name: certifi +Version: 2024.2.2 +Summary: Python package for providing Mozilla's CA Bundle. +Home-page: https://github.com/certifi/python-certifi +Author: Kenneth Reitz +Author-email: me@kennethreitz.com +License: MPL-2.0 +Project-URL: Source, https://github.com/certifi/python-certifi +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.6 +License-File: LICENSE + +Certifi: Python SSL Certificates +================================ + +Certifi provides Mozilla's carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed certificate authority (CA) bundle, you can use the +built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' + +Or from the command line:: + + $ python -m certifi + /usr/local/lib/python3.7/site-packages/certifi/cacert.pem + +Enjoy! + +.. _`Requests`: https://requests.readthedocs.io/en/master/ + +Addition/Removal of Certificates +-------------------------------- + +Certifi does not support any addition/removal or other modification of the +CA trust store content. This project is intended to provide a reliable and +highly portable root of trust to python deployments. Look to upstream projects +for methods to use alternate trust. diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3f70d91915cf10dffe340302ca51705e29b6f520 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/RECORD @@ -0,0 +1,14 @@ +certifi-2024.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +certifi-2024.2.2.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +certifi-2024.2.2.dist-info/METADATA,sha256=1noreLRChpOgeSj0uJT1mehiBl8ngh33Guc7KdvzYYM,2170 +certifi-2024.2.2.dist-info/RECORD,, +certifi-2024.2.2.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +certifi-2024.2.2.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__init__.py,sha256=ljtEx-EmmPpTe2SOd5Kzsujm_lUD0fKJVnE9gzce320,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/__pycache__/__init__.cpython-310.pyc,, +certifi/__pycache__/__main__.cpython-310.pyc,, +certifi/__pycache__/core.cpython-310.pyc,, +certifi/cacert.pem,sha256=ejR8qP724p-CtuR4U1WmY1wX-nVeCUD2XxWqj8e9f5I,292541 +certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..963eac530b9bc28d704d1bc410299c68e3216d4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/top_level.txt @@ -0,0 +1 @@ +certifi diff --git a/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d272f6ed555bf206d2a9572524bfa3c0b500fe8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..c36a5e3ee479856d913cae1521b4bd2122587325 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/chardet-5.2.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +chardetect = chardet.cli.chardetect:main diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_termui_impl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_termui_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49cb9dd7962dcac6d86a6d58ce77b5acc915bc36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_termui_impl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_winconsole.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_winconsole.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ff44b5b08316a741662457601319b27e9776f2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/_winconsole.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ca035313b2683edaa576b064958960bd676fde4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/decorators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12c2bf1099ae647b3f926783b90bec0766c1395b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/decorators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7faf8b4f4f029e655989f30fde679e43d68cc944 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/formatting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95276618f79a70bae8e2ba47b1fbf8630d7e1116 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/formatting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/click/__pycache__/termui.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/termui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47fc6cd6ea474015262916b2685ffc02822a5c19 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/click/__pycache__/termui.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..928f0009fd099386ceb48da07cb9ec1f37b51167 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/RECORD @@ -0,0 +1,22 @@ +filelock-3.13.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.13.4.dist-info/METADATA,sha256=id-2QR57cZ5P7lgQZdhd_xGLBlUrD1p_mFir6ITfRx4,2792 +filelock-3.13.4.dist-info/RECORD,, +filelock-3.13.4.dist-info/WHEEL,sha256=as-1oFTWSeWBgyzh0O_qF439xqBe6AbBgt4MfYe5zwY,87 +filelock-3.13.4.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210 +filelock/__init__.py,sha256=c_wPy9Fo0fmqE4V448Q2A2dDYnMTZeAVHX-kUoN3KCY,1214 +filelock/__pycache__/__init__.cpython-310.pyc,, +filelock/__pycache__/_api.cpython-310.pyc,, +filelock/__pycache__/_error.cpython-310.pyc,, +filelock/__pycache__/_soft.cpython-310.pyc,, +filelock/__pycache__/_unix.cpython-310.pyc,, +filelock/__pycache__/_util.cpython-310.pyc,, +filelock/__pycache__/_windows.cpython-310.pyc,, +filelock/__pycache__/version.cpython-310.pyc,, +filelock/_api.py,sha256=rdQfF14SEH01ZoSQXdRm7CW7xGFUTBZyQytd8TySiio,12330 +filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787 +filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711 +filelock/_unix.py,sha256=-FXP0tjInBHUYygOlMpp4taUmD87QOkrD_4ybg_iT7Q,2259 +filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715 +filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=7_6ZiCN2IEbqvGolC1tvkUZXIMID8-9wKt9GAmhPeyU,413 diff --git a/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..8781814daf6f6e9eb407b0dd748ad19ed5f050a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/filelock-3.13.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.22.5 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67590a5e5be5a5a2dde3fe53a7512e404a896c22 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Martin Durant +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f75287c62a87ac37ab74a7f85b78b743f8e85db4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/METADATA @@ -0,0 +1,167 @@ +Metadata-Version: 2.1 +Name: fsspec +Version: 2024.2.0 +Summary: File-system specification +Home-page: https://github.com/fsspec/filesystem_spec +Maintainer: Martin Durant +Maintainer-email: mdurant@anaconda.com +License: BSD +Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html +Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/ +Keywords: file +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: abfs +Requires-Dist: adlfs ; extra == 'abfs' +Provides-Extra: adl +Requires-Dist: adlfs ; extra == 'adl' +Provides-Extra: arrow +Requires-Dist: pyarrow >=1 ; extra == 'arrow' +Provides-Extra: dask +Requires-Dist: dask ; extra == 'dask' +Requires-Dist: distributed ; extra == 'dask' +Provides-Extra: devel +Requires-Dist: pytest ; extra == 'devel' +Requires-Dist: pytest-cov ; extra == 'devel' +Provides-Extra: dropbox +Requires-Dist: dropboxdrivefs ; extra == 'dropbox' +Requires-Dist: requests ; extra == 'dropbox' +Requires-Dist: dropbox ; extra == 'dropbox' +Provides-Extra: entrypoints +Provides-Extra: full +Requires-Dist: adlfs ; extra == 'full' +Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'full' +Requires-Dist: dask ; extra == 'full' +Requires-Dist: distributed ; extra == 'full' +Requires-Dist: dropbox ; extra == 'full' +Requires-Dist: dropboxdrivefs ; extra == 'full' +Requires-Dist: fusepy ; extra == 'full' +Requires-Dist: gcsfs ; extra == 'full' +Requires-Dist: libarchive-c ; extra == 'full' +Requires-Dist: ocifs ; extra == 'full' +Requires-Dist: panel ; extra == 'full' +Requires-Dist: paramiko ; extra == 'full' +Requires-Dist: pyarrow >=1 ; extra == 'full' +Requires-Dist: pygit2 ; extra == 'full' +Requires-Dist: requests ; extra == 'full' +Requires-Dist: s3fs ; extra == 'full' +Requires-Dist: smbprotocol ; extra == 'full' +Requires-Dist: tqdm ; extra == 'full' +Provides-Extra: fuse +Requires-Dist: fusepy ; extra == 'fuse' +Provides-Extra: gcs +Requires-Dist: gcsfs ; extra == 'gcs' +Provides-Extra: git +Requires-Dist: pygit2 ; extra == 'git' +Provides-Extra: github +Requires-Dist: requests ; extra == 'github' +Provides-Extra: gs +Requires-Dist: gcsfs ; extra == 'gs' +Provides-Extra: gui +Requires-Dist: panel ; extra == 'gui' +Provides-Extra: hdfs +Requires-Dist: pyarrow >=1 ; extra == 'hdfs' +Provides-Extra: http +Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'http' +Provides-Extra: libarchive +Requires-Dist: libarchive-c ; extra == 'libarchive' +Provides-Extra: oci +Requires-Dist: ocifs ; extra == 'oci' +Provides-Extra: s3 +Requires-Dist: s3fs ; extra == 's3' +Provides-Extra: sftp +Requires-Dist: paramiko ; extra == 'sftp' +Provides-Extra: smb +Requires-Dist: smbprotocol ; extra == 'smb' +Provides-Extra: ssh +Requires-Dist: paramiko ; extra == 'ssh' +Provides-Extra: tqdm +Requires-Dist: tqdm ; extra == 'tqdm' + +# filesystem_spec + +[![PyPI version](https://badge.fury.io/py/fsspec.svg)](https://pypi.python.org/pypi/fsspec/) +[![Anaconda-Server Badge](https://anaconda.org/conda-forge/fsspec/badges/version.svg)](https://anaconda.org/conda-forge/fsspec) +![Build](https://github.com/fsspec/filesystem_spec/workflows/CI/badge.svg) +[![Docs](https://readthedocs.org/projects/filesystem-spec/badge/?version=latest)](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) +[![PyPi downloads](https://img.shields.io/pypi/dm/fsspec?label=pypi%20downloads&style=flat)](https://pepy.tech/project/fsspec) + +A specification for pythonic filesystems. + +## Install + +```bash +pip install fsspec +``` + +would install the base fsspec. Various optionally supported features might require specification of custom +extra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support. +Use `pip install fsspec[full]` for installation of all known extra dependencies. + +Up-to-date package also provided through conda-forge distribution: + +```bash +conda install -c conda-forge fsspec +``` + + +## Purpose + +To produce a template or specification for a file-system interface, that specific implementations should follow, +so that applications making use of them can rely on a common behaviour and not have to worry about the specific +internal implementation decisions with any given backend. Many such implementations are included in this package, +or in sister projects such as `s3fs` and `gcsfs`. + +In addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE +mounting of the file-system implementation may be available for all implementations "for free". + +## Documentation + +Please refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) + +## Develop + +fsspec uses GitHub Actions for CI. Environment files can be found +in the "ci/" directory. Note that the main environment is called "py38", +but it is expected that the version of python installed be adjustable at +CI runtime. For local use, pick a version suitable for you. + +### Testing + +Tests can be run in the dev environment, if activated, via ``pytest fsspec``. + +The full fsspec suite requires a system-level docker, docker-compose, and fuse +installation. If only making changes to one backend implementation, it is +not generally necessary to run all tests locally. + +It is expected that contributors ensure that any change to fsspec does not +cause issues or regressions for either other fsspec-related packages such +as gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI +run and corresponding environment file run a set of tests from the dask +test suite, and very minimal tests against pandas and zarr from the +test_downstream.py module in this repo. + +### Code Formatting + +fsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure +a consistent code format throughout the project. +Run ``black fsspec`` from the root of the filesystem_spec repository to +auto-format your code. Additionally, many editors have plugins that will apply +``black`` as you edit files. ``black`` is included in the ``tox`` environments. + +Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to +automatically run ``black`` when you make a git commit. +Run ``pre-commit install --install-hooks`` from the root of the +filesystem_spec repository to setup pre-commit hooks. ``black`` will now be run +before you commit, reformatting any changed files. You can format without +committing via ``pre-commit run`` or skip these checks with ``git commit +--no-verify``. diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e34d1067039604b532ec2e8ff0bcce2856ab54fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/RECORD @@ -0,0 +1,104 @@ +fsspec-2024.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fsspec-2024.2.0.dist-info/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513 +fsspec-2024.2.0.dist-info/METADATA,sha256=uwzW1Braxnd_QGVI8W6J0KHi5KTiTJEm8YzSUdG-_Dc,6786 +fsspec-2024.2.0.dist-info/RECORD,, +fsspec-2024.2.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +fsspec-2024.2.0.dist-info/top_level.txt,sha256=blt2pDrQDwN3Gklcw13CSPLQRd6aaOgJ8AxqrW395MI,7 +fsspec/__init__.py,sha256=2kT62GfFK-AjgS-LgwSsCo_VA2IePvsyv8Ash5oiaFA,1982 +fsspec/__pycache__/__init__.cpython-310.pyc,, +fsspec/__pycache__/_version.cpython-310.pyc,, +fsspec/__pycache__/archive.cpython-310.pyc,, +fsspec/__pycache__/asyn.cpython-310.pyc,, +fsspec/__pycache__/caching.cpython-310.pyc,, +fsspec/__pycache__/callbacks.cpython-310.pyc,, +fsspec/__pycache__/compression.cpython-310.pyc,, +fsspec/__pycache__/config.cpython-310.pyc,, +fsspec/__pycache__/conftest.cpython-310.pyc,, +fsspec/__pycache__/core.cpython-310.pyc,, +fsspec/__pycache__/dircache.cpython-310.pyc,, +fsspec/__pycache__/exceptions.cpython-310.pyc,, +fsspec/__pycache__/fuse.cpython-310.pyc,, +fsspec/__pycache__/generic.cpython-310.pyc,, +fsspec/__pycache__/gui.cpython-310.pyc,, +fsspec/__pycache__/mapping.cpython-310.pyc,, +fsspec/__pycache__/parquet.cpython-310.pyc,, +fsspec/__pycache__/registry.cpython-310.pyc,, +fsspec/__pycache__/spec.cpython-310.pyc,, +fsspec/__pycache__/transaction.cpython-310.pyc,, +fsspec/__pycache__/utils.cpython-310.pyc,, +fsspec/_version.py,sha256=onTKKWe4fXkBjQxbTwM82SUT0H3x4U17IYrciFAryaU,500 +fsspec/archive.py,sha256=S__DzfZj-urAN3tp2W6jJ6YDiXG1fAl7FjvWUN73qIE,2386 +fsspec/asyn.py,sha256=kJ45sFFya2lZsmu2v8CVc8ZPRs8AccEzAy6Jot2ylkU,36157 +fsspec/caching.py,sha256=N45pzJdD4w5FOX_sxGvHWirggPNB66JTGP1HH6fpSck,28781 +fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210 +fsspec/compression.py,sha256=Yyd8FXw2rwWRtVoRVah_yguv-J7BUcBo4yDu6Qt52a0,4859 +fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279 +fsspec/conftest.py,sha256=fVfx-NLrH_OZS1TIpYNoPzM7efEcMoL62reHOdYeFCA,1245 +fsspec/core.py,sha256=0yCj1Z5MhbSDIQiqFs49VORl9QaGwV6hp9bXdkIoPIo,22363 +fsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717 +fsspec/exceptions.py,sha256=xcS7LiRrQ748kvOB9mrUR14kpjNztrHgEkZWi9M-VaI,330 +fsspec/fuse.py,sha256=66amOa6wdIbS0DMhhfAPUoOB37HPorfXD1izV0prmTY,10145 +fsspec/generic.py,sha256=NuNaP66OaphwMbuLHRFBLda78TD81isa9O4ozJqbUv0,13455 +fsspec/gui.py,sha256=XKoXZpUhRE7jOhRCJH4-jRbKhVu56aS8h9tecvPD3nc,13932 +fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fsspec/implementations/__pycache__/__init__.cpython-310.pyc,, +fsspec/implementations/__pycache__/arrow.cpython-310.pyc,, +fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc,, +fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc,, +fsspec/implementations/__pycache__/cached.cpython-310.pyc,, +fsspec/implementations/__pycache__/dask.cpython-310.pyc,, +fsspec/implementations/__pycache__/data.cpython-310.pyc,, +fsspec/implementations/__pycache__/dbfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/dirfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/ftp.cpython-310.pyc,, +fsspec/implementations/__pycache__/git.cpython-310.pyc,, +fsspec/implementations/__pycache__/github.cpython-310.pyc,, +fsspec/implementations/__pycache__/http.cpython-310.pyc,, +fsspec/implementations/__pycache__/jupyter.cpython-310.pyc,, +fsspec/implementations/__pycache__/libarchive.cpython-310.pyc,, +fsspec/implementations/__pycache__/local.cpython-310.pyc,, +fsspec/implementations/__pycache__/memory.cpython-310.pyc,, +fsspec/implementations/__pycache__/reference.cpython-310.pyc,, +fsspec/implementations/__pycache__/sftp.cpython-310.pyc,, +fsspec/implementations/__pycache__/smb.cpython-310.pyc,, +fsspec/implementations/__pycache__/tar.cpython-310.pyc,, +fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/zip.cpython-310.pyc,, +fsspec/implementations/arrow.py,sha256=_7TLuV6ZzNlpmUU_v6ud56u2wadzsKmY5qugPBxgMEs,8649 +fsspec/implementations/cache_mapper.py,sha256=iHgBA6gjzDJ7_mBboHFzpLTf55HP3UEwUOZ43xyUK4M,2429 +fsspec/implementations/cache_metadata.py,sha256=ZvyA7Y3KK-5Ct4E5pELzD6mH_5T03XqaKVT96qYDADU,8576 +fsspec/implementations/cached.py,sha256=LbbPbeUup07O0y7gXD_atFgajWM9p1vlDKu_BOyLfbo,30943 +fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466 +fsspec/implementations/data.py,sha256=Oti0dKzyeadnVIedo3s8CADoh9bNM-96_6viTEYr4lo,1245 +fsspec/implementations/dbfs.py,sha256=cix9OYUveuSOx5UO5uRUwNUkYqjzyY0fkKnca1kTgZ0,15014 +fsspec/implementations/dirfs.py,sha256=inDIRSDPhI1_ud1MMBFrpZQ11VIAMJ_dZQtbE4V08Ng,11384 +fsspec/implementations/ftp.py,sha256=rp6cTog8xqjDPlKdSLKcsyP7K593_ByMabxGbNSEpTo,11655 +fsspec/implementations/git.py,sha256=vKGI-Vd5q4H2RrvhebkPc9NwlfkZ980OUGhebeCw-M0,4034 +fsspec/implementations/github.py,sha256=0kIiKkeAaROuHgdWBHVQFrzJ2ZfoDgymCehL_kJXHYA,7565 +fsspec/implementations/http.py,sha256=PkhfgUV3-T7fG2Jf-NLX9doH52snV5Wmw91uVA9k74M,29454 +fsspec/implementations/jupyter.py,sha256=B2uj7OEm7yIk-vRSsO37_ND0t0EBvn4B-Su43ibN4Pg,3811 +fsspec/implementations/libarchive.py,sha256=5_I2DiLXwQ1JC8x-K7jXu-tBwhO9dj7tFLnb0bTnVMQ,7102 +fsspec/implementations/local.py,sha256=nxiRKg9FAQHTQss9-ET8ZzDXPGhSOktgkxrg0ffMs2I,13454 +fsspec/implementations/memory.py,sha256=2iU--pOV2KCTrS-d5K8VKSygh9MPk2D7NZ_C8lMMEIw,9701 +fsspec/implementations/reference.py,sha256=0iGu8mscaQ3a5iTlRNByytQ3_-1Bj8__ARqVwyy4q2M,43871 +fsspec/implementations/sftp.py,sha256=fMY9XZcmpjszQ2tCqO_TPaJesaeD_Dv7ptYzgUPGoO0,5631 +fsspec/implementations/smb.py,sha256=k3RtzW97lJtYuw_QpP1rJRFnUBmSsw9twFjUCex0a5U,10591 +fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111 +fsspec/implementations/webhdfs.py,sha256=wqVfno7z0TY1HepaIvKTUUcl_bi5NkV6qWsST8t_s7Y,16745 +fsspec/implementations/zip.py,sha256=JDX-3HOI15qUl6VTBsNPuDp5RVN6s2n3Bywd4mMu0T0,4347 +fsspec/mapping.py,sha256=WFEXRWxujQwfzzkRP5tpdIE0265okAtlP97qFZGvV1k,8165 +fsspec/parquet.py,sha256=qVxDhwc960SGOt5etcYAJxCr-7HQKP01687KpDR02Gw,19463 +fsspec/registry.py,sha256=-dl7sh2tsfhMA2uxz5KQDsPFehQTgMJIbVjNq6QLoKU,11145 +fsspec/spec.py,sha256=3t96RgizRN_slIuHXnuR0bXjVUfBS1TfuDrEua4oQvE,66277 +fsspec/tests/abstract/__init__.py,sha256=i1wcFixV6QhOwdoB24c8oXjzobISNqiKVz9kl2DvAY8,10028 +fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/common.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/get.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/put.cpython-310.pyc,, +fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973 +fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967 +fsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755 +fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201 +fsspec/transaction.py,sha256=jeexB-H6Aw_gN6Z7hoKKe6v8zizITq39-gyTgpipIKE,2251 +fsspec/utils.py,sha256=_VX_0VwDtoAFSjMYrxvJvnPNX9FMoHO5BlFHXJ0bHFI,23053 diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..968fea66e533ba30593c7fbfe750c36fae2f3cfe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/fsspec-2024.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +fsspec diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..34b08eaddcc0e2db0c01cf82187d4c7bf64b5c44 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/METADATA @@ -0,0 +1,296 @@ +Metadata-Version: 2.1 +Name: huggingface-hub +Version: 0.22.2 +Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub +Home-page: https://github.com/huggingface/huggingface_hub +Author: Hugging Face, Inc. +Author-email: julien@huggingface.co +License: Apache +Keywords: model-hub machine-learning models natural-language-processing deep-learning pytorch pretrained-models +Platform: UNKNOWN +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: filelock +Requires-Dist: fsspec >=2023.5.0 +Requires-Dist: packaging >=20.9 +Requires-Dist: pyyaml >=5.1 +Requires-Dist: requests +Requires-Dist: tqdm >=4.42.1 +Requires-Dist: typing-extensions >=3.7.4.3 +Provides-Extra: all +Requires-Dist: InquirerPy ==0.3.4 ; extra == 'all' +Requires-Dist: aiohttp ; extra == 'all' +Requires-Dist: minijinja >=1.0 ; extra == 'all' +Requires-Dist: jedi ; extra == 'all' +Requires-Dist: Jinja2 ; extra == 'all' +Requires-Dist: pytest ; extra == 'all' +Requires-Dist: pytest-cov ; extra == 'all' +Requires-Dist: pytest-env ; extra == 'all' +Requires-Dist: pytest-xdist ; extra == 'all' +Requires-Dist: pytest-vcr ; extra == 'all' +Requires-Dist: pytest-asyncio ; extra == 'all' +Requires-Dist: pytest-rerunfailures ; extra == 'all' +Requires-Dist: urllib3 <2.0 ; extra == 'all' +Requires-Dist: soundfile ; extra == 'all' +Requires-Dist: Pillow ; extra == 'all' +Requires-Dist: gradio ; extra == 'all' +Requires-Dist: numpy ; extra == 'all' +Requires-Dist: ruff >=0.3.0 ; extra == 'all' +Requires-Dist: mypy ==1.5.1 ; extra == 'all' +Requires-Dist: typing-extensions >=4.8.0 ; extra == 'all' +Requires-Dist: types-PyYAML ; extra == 'all' +Requires-Dist: types-requests ; extra == 'all' +Requires-Dist: types-simplejson ; extra == 'all' +Requires-Dist: types-toml ; extra == 'all' +Requires-Dist: types-tqdm ; extra == 'all' +Requires-Dist: types-urllib3 ; extra == 'all' +Provides-Extra: cli +Requires-Dist: InquirerPy ==0.3.4 ; extra == 'cli' +Provides-Extra: dev +Requires-Dist: InquirerPy ==0.3.4 ; extra == 'dev' +Requires-Dist: aiohttp ; extra == 'dev' +Requires-Dist: minijinja >=1.0 ; extra == 'dev' +Requires-Dist: jedi ; extra == 'dev' +Requires-Dist: Jinja2 ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: pytest-cov ; extra == 'dev' +Requires-Dist: pytest-env ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Requires-Dist: pytest-vcr ; extra == 'dev' +Requires-Dist: pytest-asyncio ; extra == 'dev' +Requires-Dist: pytest-rerunfailures ; extra == 'dev' +Requires-Dist: urllib3 <2.0 ; extra == 'dev' +Requires-Dist: soundfile ; extra == 'dev' +Requires-Dist: Pillow ; extra == 'dev' +Requires-Dist: gradio ; extra == 'dev' +Requires-Dist: numpy ; extra == 'dev' +Requires-Dist: ruff >=0.3.0 ; extra == 'dev' +Requires-Dist: mypy ==1.5.1 ; extra == 'dev' +Requires-Dist: typing-extensions >=4.8.0 ; extra == 'dev' +Requires-Dist: types-PyYAML ; extra == 'dev' +Requires-Dist: types-requests ; extra == 'dev' +Requires-Dist: types-simplejson ; extra == 'dev' +Requires-Dist: types-toml ; extra == 'dev' +Requires-Dist: types-tqdm ; extra == 'dev' +Requires-Dist: types-urllib3 ; extra == 'dev' +Provides-Extra: fastai +Requires-Dist: toml ; extra == 'fastai' +Requires-Dist: fastai >=2.4 ; extra == 'fastai' +Requires-Dist: fastcore >=1.3.27 ; extra == 'fastai' +Provides-Extra: hf_transfer +Requires-Dist: hf-transfer >=0.1.4 ; extra == 'hf_transfer' +Provides-Extra: inference +Requires-Dist: aiohttp ; extra == 'inference' +Requires-Dist: minijinja >=1.0 ; extra == 'inference' +Provides-Extra: quality +Requires-Dist: ruff >=0.3.0 ; extra == 'quality' +Requires-Dist: mypy ==1.5.1 ; extra == 'quality' +Provides-Extra: tensorflow +Requires-Dist: tensorflow ; extra == 'tensorflow' +Requires-Dist: pydot ; extra == 'tensorflow' +Requires-Dist: graphviz ; extra == 'tensorflow' +Provides-Extra: tensorflow-testing +Requires-Dist: tensorflow ; extra == 'tensorflow-testing' +Requires-Dist: keras <3.0 ; extra == 'tensorflow-testing' +Provides-Extra: testing +Requires-Dist: InquirerPy ==0.3.4 ; extra == 'testing' +Requires-Dist: aiohttp ; extra == 'testing' +Requires-Dist: minijinja >=1.0 ; extra == 'testing' +Requires-Dist: jedi ; extra == 'testing' +Requires-Dist: Jinja2 ; extra == 'testing' +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: pytest-cov ; extra == 'testing' +Requires-Dist: pytest-env ; extra == 'testing' +Requires-Dist: pytest-xdist ; extra == 'testing' +Requires-Dist: pytest-vcr ; extra == 'testing' +Requires-Dist: pytest-asyncio ; extra == 'testing' +Requires-Dist: pytest-rerunfailures ; extra == 'testing' +Requires-Dist: urllib3 <2.0 ; extra == 'testing' +Requires-Dist: soundfile ; extra == 'testing' +Requires-Dist: Pillow ; extra == 'testing' +Requires-Dist: gradio ; extra == 'testing' +Requires-Dist: numpy ; extra == 'testing' +Provides-Extra: torch +Requires-Dist: torch ; extra == 'torch' +Requires-Dist: safetensors ; extra == 'torch' +Provides-Extra: typing +Requires-Dist: typing-extensions >=4.8.0 ; extra == 'typing' +Requires-Dist: types-PyYAML ; extra == 'typing' +Requires-Dist: types-requests ; extra == 'typing' +Requires-Dist: types-simplejson ; extra == 'typing' +Requires-Dist: types-toml ; extra == 'typing' +Requires-Dist: types-tqdm ; extra == 'typing' +Requires-Dist: types-urllib3 ; extra == 'typing' + +

+
+ huggingface_hub library logo +
+

+ +

+ The official Python client for the Huggingface Hub. +

+ +

+ Documentation + GitHub release + PyPi version + downloads + Code coverage +

+ +

+

+ English | + Deutsch | + हिंदी | + 한국어 | + 中文(简体) +

+

+--- + +**Documentation**: https://hf.co/docs/huggingface_hub + +**Source Code**: https://github.com/huggingface/huggingface_hub + +--- + +## Welcome to the huggingface_hub library + +The `huggingface_hub` library allows you to interact with the [Hugging Face Hub](https://huggingface.co/), a platform democratizing open-source Machine Learning for creators and collaborators. Discover pre-trained models and datasets for your projects or play with the thousands of machine learning apps hosted on the Hub. You can also create and share your own models, datasets and demos with the community. The `huggingface_hub` library provides a simple way to do all these things with Python. + +## Key features + +- [Download files](https://huggingface.co/docs/huggingface_hub/en/guides/download) from the Hub. +- [Upload files](https://huggingface.co/docs/huggingface_hub/en/guides/upload) to the Hub. +- [Manage your repositories](https://huggingface.co/docs/huggingface_hub/en/guides/repository). +- [Run Inference](https://huggingface.co/docs/huggingface_hub/en/guides/inference) on deployed models. +- [Search](https://huggingface.co/docs/huggingface_hub/en/guides/search) for models, datasets and Spaces. +- [Share Model Cards](https://huggingface.co/docs/huggingface_hub/en/guides/model-cards) to document your models. +- [Engage with the community](https://huggingface.co/docs/huggingface_hub/en/guides/community) through PRs and comments. + +## Installation + +Install the `huggingface_hub` package with [pip](https://pypi.org/project/huggingface-hub/): + +```bash +pip install huggingface_hub +``` + +If you prefer, you can also install it with [conda](https://huggingface.co/docs/huggingface_hub/en/installation#install-with-conda). + +In order to keep the package minimal by default, `huggingface_hub` comes with optional dependencies useful for some use cases. For example, if you want have a complete experience for Inference, run: + +```bash +pip install huggingface_hub[inference] +``` + +To learn more installation and optional dependencies, check out the [installation guide](https://huggingface.co/docs/huggingface_hub/en/installation). + +## Quick start + +### Download files + +Download a single file + +```py +from huggingface_hub import hf_hub_download + +hf_hub_download(repo_id="tiiuae/falcon-7b-instruct", filename="config.json") +``` + +Or an entire repository + +```py +from huggingface_hub import snapshot_download + +snapshot_download("stabilityai/stable-diffusion-2-1") +``` + +Files will be downloaded in a local cache folder. More details in [this guide](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache). + +### Login + +The Hugging Face Hub uses tokens to authenticate applications (see [docs](https://huggingface.co/docs/hub/security-tokens)). To login your machine, run the following CLI: + +```bash +huggingface-cli login +# or using an environment variable +huggingface-cli login --token $HUGGINGFACE_TOKEN +``` + +### Create a repository + +```py +from huggingface_hub import create_repo + +create_repo(repo_id="super-cool-model") +``` + +### Upload files + +Upload a single file + +```py +from huggingface_hub import upload_file + +upload_file( + path_or_fileobj="/home/lysandre/dummy-test/README.md", + path_in_repo="README.md", + repo_id="lysandre/test-model", +) +``` + +Or an entire folder + +```py +from huggingface_hub import upload_folder + +upload_folder( + folder_path="/path/to/local/space", + repo_id="username/my-cool-space", + repo_type="space", +) +``` + +For details in the [upload guide](https://huggingface.co/docs/huggingface_hub/en/guides/upload). + +## Integrating to the Hub. + +We're partnering with cool open source ML libraries to provide free model hosting and versioning. You can find the existing integrations [here](https://huggingface.co/docs/hub/libraries). + +The advantages are: + +- Free model or dataset hosting for libraries and their users. +- Built-in file versioning, even with very large files, thanks to a git-based approach. +- Serverless inference API for all models publicly available. +- In-browser widgets to play with the uploaded models. +- Anyone can upload a new model for your library, they just need to add the corresponding tag for the model to be discoverable. +- Fast downloads! We use Cloudfront (a CDN) to geo-replicate downloads so they're blazing fast from anywhere on the globe. +- Usage stats and more features to come. + +If you would like to integrate your library, feel free to open an issue to begin the discussion. We wrote a [step-by-step guide](https://huggingface.co/docs/hub/adding-a-library) with ❤️ showing how to do this integration. + +## Contributions (feature requests, bugs, etc.) are super welcome 💙💚💛💜🧡❤️ + +Everyone is welcome to contribute, and we value everybody's contribution. Code is not the only way to help the community. +Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community. +We wrote a [contribution guide](https://github.com/huggingface/huggingface_hub/blob/main/CONTRIBUTING.md) to summarize +how to get started to contribute to this repository. + + diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cf0f5aeceef031c9002f114dac21b942395ffa76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/RECORD @@ -0,0 +1,220 @@ +../../../bin/huggingface-cli,sha256=-OdYm5fxr7GKfY2KQznucsZ0m4uf7x5bZ0sMQIRYKTE,265 +huggingface_hub-0.22.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +huggingface_hub-0.22.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +huggingface_hub-0.22.2.dist-info/METADATA,sha256=9S0T9nMXn6Q7cesvjVBiSk4oEid8LZ3gT9fuJLGrWNk,12869 +huggingface_hub-0.22.2.dist-info/RECORD,, +huggingface_hub-0.22.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +huggingface_hub-0.22.2.dist-info/entry_points.txt,sha256=Y3Z2L02rBG7va_iE6RPXolIgwOdwUFONyRN3kXMxZ0g,131 +huggingface_hub-0.22.2.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16 +huggingface_hub/__init__.py,sha256=lDulrtedaTz17qYF0AzTHuRanxwgeGlVdq6TxmIcuQQ,31075 +huggingface_hub/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/__pycache__/_commit_api.cpython-310.pyc,, +huggingface_hub/__pycache__/_commit_scheduler.cpython-310.pyc,, +huggingface_hub/__pycache__/_inference_endpoints.cpython-310.pyc,, +huggingface_hub/__pycache__/_login.cpython-310.pyc,, +huggingface_hub/__pycache__/_multi_commits.cpython-310.pyc,, +huggingface_hub/__pycache__/_snapshot_download.cpython-310.pyc,, +huggingface_hub/__pycache__/_space_api.cpython-310.pyc,, +huggingface_hub/__pycache__/_tensorboard_logger.cpython-310.pyc,, +huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc,, +huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc,, +huggingface_hub/__pycache__/community.cpython-310.pyc,, +huggingface_hub/__pycache__/constants.cpython-310.pyc,, +huggingface_hub/__pycache__/errors.cpython-310.pyc,, +huggingface_hub/__pycache__/fastai_utils.cpython-310.pyc,, +huggingface_hub/__pycache__/file_download.cpython-310.pyc,, +huggingface_hub/__pycache__/hf_api.cpython-310.pyc,, +huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc,, +huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc,, +huggingface_hub/__pycache__/inference_api.cpython-310.pyc,, +huggingface_hub/__pycache__/keras_mixin.cpython-310.pyc,, +huggingface_hub/__pycache__/lfs.cpython-310.pyc,, +huggingface_hub/__pycache__/repocard.cpython-310.pyc,, +huggingface_hub/__pycache__/repocard_data.cpython-310.pyc,, +huggingface_hub/__pycache__/repository.cpython-310.pyc,, +huggingface_hub/_commit_api.py,sha256=_6NggUJsCBdspCJjXbvZYXH6vLBOnkG6I2E1kc8aJm8,29194 +huggingface_hub/_commit_scheduler.py,sha256=FgfjYv3E0oK3iBxDdy45Y7t78FWkmjnBR4dRd5aZviU,13653 +huggingface_hub/_inference_endpoints.py,sha256=wGcnxZNFCbMK77SA90fPsZ9bqNGwPopSVr-sTbdw3o8,15763 +huggingface_hub/_login.py,sha256=jNTFCnou-eZAtMWl1PDuyZhmpW3O-f4qff9m5hU0UGk,15364 +huggingface_hub/_multi_commits.py,sha256=rtN0AaHzamXi1cvr1r2MlqR6y-laZxgRo-30J_I3RwM,12519 +huggingface_hub/_snapshot_download.py,sha256=lXsSWaDSOP0hH5_9tOSBazNAtWSBhN8dMcjrLY7hNIc,15677 +huggingface_hub/_space_api.py,sha256=Mae_lqTRyTWyszI5mlObJ2fn9slPxkFPcFTEVADoNQM,5255 +huggingface_hub/_tensorboard_logger.py,sha256=uqmkKBKyj6_9XfWq563afgARenZ7-fjEHb16rgY24-Y,7166 +huggingface_hub/_webhooks_payload.py,sha256=cF9iWOOacOZfqKGcuVhykDgAZHrHON7VMuLwwehl6O8,2832 +huggingface_hub/_webhooks_server.py,sha256=fDbyDu28qhJJQb8tKpH1C8l4cJSvC3Gr2sUo1DbIoD8,15197 +huggingface_hub/commands/__init__.py,sha256=AkbM2a-iGh0Vq_xAWhK3mu3uZ44km8-X5uWjKcvcrUQ,928 +huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/download.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/env.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/upload.cpython-310.pyc,, +huggingface_hub/commands/__pycache__/user.cpython-310.pyc,, +huggingface_hub/commands/_cli_utils.py,sha256=qRdl9opi3yJxIVNCnrmte-jFWmYbjVqd8gBlin8NNzY,1971 +huggingface_hub/commands/delete_cache.py,sha256=Rb1BtIltJPnQ-th7tcK_L4mFqfk785t3KXV77xXKBP4,16131 +huggingface_hub/commands/download.py,sha256=yGq9SlTRHR6TBrky_VBIyo68e0gnMLgYPBYIBG7d9bQ,9167 +huggingface_hub/commands/env.py,sha256=yYl4DSS14V8t244nAi0t77Izx5LIdgS_dy6xiV5VQME,1226 +huggingface_hub/commands/huggingface_cli.py,sha256=o862C98OcZoyqCzY7mNpia1h0KaLJUgSb0y10ot8sxA,1924 +huggingface_hub/commands/lfs.py,sha256=6E769AoRxUDiIOapn1_QvTbNtdUnUiouu2F4Gopp4do,7318 +huggingface_hub/commands/scan_cache.py,sha256=4o_jQsZloicRa-P8gncUBncVyWswpSF9T6KGlNrGodk,5183 +huggingface_hub/commands/upload.py,sha256=Mr69qO60otqCVw0sVSBPykUTkL9HO-pkCyulSD2mROM,13622 +huggingface_hub/commands/user.py,sha256=QApZJOCQEHADhjunM3hlQ72uqHsearCiCE4SdpzGdcc,6893 +huggingface_hub/community.py,sha256=SBaOfI-3atCzRbO0gDS8BYxctbdvD4G0X6D0GfY8Fgc,12203 +huggingface_hub/constants.py,sha256=8r0JaNMhLR8X6pC6TnNBLQ-TVcHEbRWk1sJ-LSIj444,7821 +huggingface_hub/errors.py,sha256=jCYKeSOsQNfH2t3TsW8kIAXXS1aWl9PaAq3prFfz4CI,704 +huggingface_hub/fastai_utils.py,sha256=5I7zAfgHJU_mZnxnf9wgWTHrCRu_EAV8VTangDVfE_o,16676 +huggingface_hub/file_download.py,sha256=DmOEVmhEsRnX8M0kZmLPLC76eptMT0riTwtThFioV8Q,77476 +huggingface_hub/hf_api.py,sha256=dJsdtW6WJW04h84BE46FOg8Pp34AgYhg-NbMg9WrReY,367303 +huggingface_hub/hf_file_system.py,sha256=JUCT-VZBesDCB-uN__fvQt3uprGQETGnUlzjC7StQLM,37272 +huggingface_hub/hub_mixin.py,sha256=xkUTJiP5TiAiVj6ts9Thffcm4wufZS3jMWil3LB2uvw,30423 +huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/inference/__pycache__/_client.cpython-310.pyc,, +huggingface_hub/inference/__pycache__/_common.cpython-310.pyc,, +huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc,, +huggingface_hub/inference/__pycache__/_types.cpython-310.pyc,, +huggingface_hub/inference/_client.py,sha256=D4O07nYFo7v5lXZ7id3zubV6u8_RxhTVStoeAgEBs-E,104854 +huggingface_hub/inference/_common.py,sha256=BOOBNpF0_V8fDXsi2q5eQ9i4KIlPS6VfMxYxLSgkRdM,16570 +huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc,, +huggingface_hub/inference/_generated/_async_client.py,sha256=d7fvZyyBNnG9zivHS7rCwaR7TlETN7JENR-M_ZEo_3c,108053 +huggingface_hub/inference/_generated/types/__init__.py,sha256=SoKeNQ8JjBgKkQIgBTsWJe4-Z75Ebz6WkdL4ZQ27yNc,4450 +huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc,, +huggingface_hub/inference/_generated/types/audio_classification.py,sha256=wk4kUTLQZoXWLpiUOpKRHRRE-JYqqJlzGVe62VACR-0,1347 +huggingface_hub/inference/_generated/types/audio_to_audio.py,sha256=n7GeCepzt254yoSLsdjrI1j4fzYgjWzxoaKE5gZJc48,881 +huggingface_hub/inference/_generated/types/automatic_speech_recognition.py,sha256=-7UHu5QTGwSrJFnrbMgzeUFpJQOGyTmfK_QHgtnx6j8,5352 +huggingface_hub/inference/_generated/types/base.py,sha256=Cq4gUVtwwLmWyiIIq4NSL8kRk0EWk9QWWHc5Vup2LVg,6213 +huggingface_hub/inference/_generated/types/chat_completion.py,sha256=cI-gWOgqEBnwhxwn21OShkKOs3mbjjzJ1Ow2pzLQEwc,3616 +huggingface_hub/inference/_generated/types/depth_estimation.py,sha256=lmLmd8S313ZMCG94RblwquL0UN_0hJmXAhWUqSIrtwc,898 +huggingface_hub/inference/_generated/types/document_question_answering.py,sha256=_hBzK4Pu9X_zXsgOO4JNSloIKuVfE5m7eGwEw5YTfZ4,3264 +huggingface_hub/inference/_generated/types/feature_extraction.py,sha256=KerTrRR5YR02X0qBDzrtK8953amCGf_adSUbfWOozD4,664 +huggingface_hub/inference/_generated/types/fill_mask.py,sha256=JcYIbTDXc4f7k2FNY3fCWtJ9ke3HUZFz2pDOOrDuxOs,1714 +huggingface_hub/inference/_generated/types/image_classification.py,sha256=W1QVfc0j7t6qbxjICUQDwygRx43yPPGZKyStogHkHqg,1359 +huggingface_hub/inference/_generated/types/image_segmentation.py,sha256=nVQc5Qhv37qqmTn_M8xegpNgk14ozKelsGIYC8hba_0,1803 +huggingface_hub/inference/_generated/types/image_to_image.py,sha256=MbubS9pD8bFP9LoI4QoQxJwpUGeNXi5iSEk8Ymhbh0M,1797 +huggingface_hub/inference/_generated/types/image_to_text.py,sha256=mloLf-LO7oR_2HbPY1-XMM18BfjMODytRaxTXYkIXoY,4827 +huggingface_hub/inference/_generated/types/object_detection.py,sha256=F8ly6GSE8dupsekPVf6G5nI8teZAIP4iXw6u3zm1JiE,1569 +huggingface_hub/inference/_generated/types/question_answering.py,sha256=xLDy5oA-k9JPncSU6NqPAPb8rWPerfTbU857G3e7JZ0,2884 +huggingface_hub/inference/_generated/types/sentence_similarity.py,sha256=edH-TWfnZ4J0zJD-zqfcRMLwOV0dTt1g5Y0caYnVuPc,1018 +huggingface_hub/inference/_generated/types/summarization.py,sha256=RWCXh7yftI_JWvLsr7JiDpQPexq1klYP158tUICUcbM,1574 +huggingface_hub/inference/_generated/types/table_question_answering.py,sha256=PuVZlR6dI6FEUK7pjMSVMtzkDgrcxdKjfcnDbVmPdSs,1569 +huggingface_hub/inference/_generated/types/text2text_generation.py,sha256=SZYfdhyraG5vZ2Jzm1C8k9w9IYLxMtm5UUu1tU2oOQk,1604 +huggingface_hub/inference/_generated/types/text_classification.py,sha256=vC7B1sBzZ4gdLjE2i2Y7w5cpdaFwQKK1dlWqW0asjIk,1347 +huggingface_hub/inference/_generated/types/text_generation.py,sha256=VHhkhEj-yEVGuy4BYgDNzmAPBPJWL3N1B4n4SUOymNk,5866 +huggingface_hub/inference/_generated/types/text_to_audio.py,sha256=cgvECsiwsycgP9Tfs_GU1CJfo9AngVn6x9s4fHCP-g4,4819 +huggingface_hub/inference/_generated/types/text_to_image.py,sha256=oBGeJ-S9WfsMxVQlvEOll9yaCyMXZ277wsYFD8bt87U,1931 +huggingface_hub/inference/_generated/types/token_classification.py,sha256=7oL8AZOTWtf2bYD2T3236GDNMtUl7FtydaB6We7wbfw,1890 +huggingface_hub/inference/_generated/types/translation.py,sha256=MruCx6yhzQGlxSdBRXCVoEhRzRSa5Ks4bjZ1PDrlTeQ,1562 +huggingface_hub/inference/_generated/types/video_classification.py,sha256=BI2_PP-pxLT6w9TuX6QCZz4BsG-ZukTXnW6fWMchI5M,1579 +huggingface_hub/inference/_generated/types/visual_question_answering.py,sha256=0PHNnjwxxHvG3SjOz7O7DckbBeGYDsRmlagG11qIkkM,1667 +huggingface_hub/inference/_generated/types/zero_shot_classification.py,sha256=u6jfFCqDv9XqeAN5E9_Xf7jqMZgqTRFF_S9PtWbiBUk,1963 +huggingface_hub/inference/_generated/types/zero_shot_image_classification.py,sha256=qVH6Ms0FjF8TraGy4BYiS8lmvGq9xiIDdXqGFynLHMA,1689 +huggingface_hub/inference/_generated/types/zero_shot_object_detection.py,sha256=PU4OOlQ2aAOosW2JlG2Z27MEQpmE6BxcygH_ns3w1KQ,1662 +huggingface_hub/inference/_templating.py,sha256=_X7CoUjOmMh5KBXx-WGey-z3uh_fP1QT36X638UZpZw,4051 +huggingface_hub/inference/_types.py,sha256=C73l5-RO8P1UMBHF8OAO9CRUq7Xdv33pcADoJsGMPSU,1782 +huggingface_hub/inference_api.py,sha256=UXOKu_Ez2I3hDsjguqCcCrj03WFDndehpngYiIAucdg,8331 +huggingface_hub/keras_mixin.py,sha256=8L0FEIWy_kmKsGI5d61q_33dGYbmLGhy4kZbqn-YFns,19681 +huggingface_hub/lfs.py,sha256=p61RJK13gtgdu0og4KHFosy_GWYDFsQJa0JJoLYSLAk,19592 +huggingface_hub/repocard.py,sha256=oUrGim27nCHkevPDZDbUp68uKTxB8xbdoyeqv24pexc,34605 +huggingface_hub/repocard_data.py,sha256=1hIkI8xp0EmW2aR3LtHMrjIMk_W-KJxHslMjpNMwVPg,31911 +huggingface_hub/repository.py,sha256=8oNhKNvJRye3dr67cTn8faKkBSiWFgvj7bIBlOpI-8U,54489 +huggingface_hub/serialization/__init__.py,sha256=W74TaCtYnMfpvGEQr1SS-OBmqPUFnM9AeWT9hTJCG9Y,910 +huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc,, +huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc,, +huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc,, +huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc,, +huggingface_hub/serialization/_base.py,sha256=AgO-16i-vyosbERnLSCFYgaXbVqQDM7xfIne8gsWrLQ,7133 +huggingface_hub/serialization/_numpy.py,sha256=idULJp1js6L6E8o-MiGVqNa4lBfXS2cfAmqivnpsaYs,2671 +huggingface_hub/serialization/_tensorflow.py,sha256=Rf4kw1NYxEaoUXB8aLtQLHrTjgobaEAJdzO0w0kbP58,3559 +huggingface_hub/serialization/_torch.py,sha256=xYR6e_G9laMTroWLiQRABSuloTQuuRSQNyYHdT_rmXU,7687 +huggingface_hub/templates/datasetcard_template.md,sha256=W-EMqR6wndbrnZorkVv56URWPG49l7MATGeI015kTvs,5503 +huggingface_hub/templates/modelcard_template.md,sha256=4AqArS3cqdtbit5Bo-DhjcnDFR-pza5hErLLTPM4Yuc,6870 +huggingface_hub/utils/__init__.py,sha256=XYIIkKiDeoy8dQQegBYSfILOzdt8NW_cfquY7omX0fQ,3478 +huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_cache_assets.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_deprecation.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_fixes.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_http.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_token.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/_validators.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/logging.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/sha.cpython-310.pyc,, +huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc,, +huggingface_hub/utils/_cache_assets.py,sha256=kai77HPQMfYpROouMBQCr_gdBCaeTm996Sqj0dExbNg,5728 +huggingface_hub/utils/_cache_manager.py,sha256=Fs1XVP1UGzUTogMfMfEi_MfpURzHyW__djX0s2oLmrY,29307 +huggingface_hub/utils/_chunk_utils.py,sha256=kRCaj5228_vKcyLWspd8Xq01f17Jz6ds5Sr9ed5d_RU,2130 +huggingface_hub/utils/_datetime.py,sha256=DHnktKm1taeOe2XCBgNU4pVck5d70qu8FJ7nACD6C3k,2554 +huggingface_hub/utils/_deprecation.py,sha256=HZhRGGUX_QMKBBBwHHlffLtmCSK01TOpeXHefZbPfwI,4872 +huggingface_hub/utils/_errors.py,sha256=N5nUkCCaj8393wntazeTcKNrwDZfsDVHVMxxreHPfaE,15141 +huggingface_hub/utils/_experimental.py,sha256=crCPH6k6-11wwH2GZuZzZzZbjUotay49ywV1SSJhMHM,2395 +huggingface_hub/utils/_fixes.py,sha256=EqG7u36J9C3NtL5VukDilca90GV9idrENzsEVhtdbI4,2829 +huggingface_hub/utils/_git_credential.py,sha256=SDdsiREr1TcAR2Ze2TB0E5cYzVJgvDZrs60od9lAsMc,4596 +huggingface_hub/utils/_headers.py,sha256=T_C1RA0bqEYL0oiE4WdFMAKXEUPHN-D43vchjiwKcZ4,9643 +huggingface_hub/utils/_hf_folder.py,sha256=gWH-TT9h_6X_CyrtLTtKNEawf9kKlCHraFiOu09BuLk,3613 +huggingface_hub/utils/_http.py,sha256=VQcukUKXXDlDQwyG-LGVtAIr3DprVC-R_HcXZzbAfak,13543 +huggingface_hub/utils/_pagination.py,sha256=hzLFLd8i_DKkPRVYzOx2CxLt5lcocEiAxDJriQUjAjY,1841 +huggingface_hub/utils/_paths.py,sha256=Ah_diO-gSWw9TYylJl_HNB2XXftgIi36HNlKAYQHCms,4398 +huggingface_hub/utils/_runtime.py,sha256=6PxkDPWj3ltRlE2-zAr3vZTXfi1OUjxILsmRN-s_wZY,10851 +huggingface_hub/utils/_safetensors.py,sha256=EE9v9HflWBUqIegn0dCGHgNu9G9Db3v2aszvG4ldPF8,4876 +huggingface_hub/utils/_subprocess.py,sha256=34ETD8JvLzm16NRZHciaCLXdE9aRyxuDdOA5gdNvMJ8,4617 +huggingface_hub/utils/_telemetry.py,sha256=jHAdgWNcL9nVvMT3ec3i78O-cwL09GnlifuokzpQjMI,4641 +huggingface_hub/utils/_token.py,sha256=cxBZaafW2IsJ2dKWd55v7056zycW1ewp_nPk8dNcSO4,5476 +huggingface_hub/utils/_typing.py,sha256=pXh7GtVtSBD_Fvvthex9BRTAJZ6bWScUOw06oJS0Lek,2025 +huggingface_hub/utils/_validators.py,sha256=otFT4xT3s_E_-jrzH4NR7xWgK7UlRkwk_KAI9XK1mb0,9359 +huggingface_hub/utils/endpoint_helpers.py,sha256=n_VguR_L2Vl6Mi_4PFO2iAd5xaPeQRiD8KRBpzs4nMw,9536 +huggingface_hub/utils/insecure_hashlib.py,sha256=OjxlvtSQHpbLp9PWSrXBDJ0wHjxCBU-SQJgucEEXDbU,1058 +huggingface_hub/utils/logging.py,sha256=Cp03s0uEl3kDM9XHQW9a8GAoExODQ-e7kEtgMt-_To8,4728 +huggingface_hub/utils/sha.py,sha256=QLlIwPCyz46MmUc_4L8xl87KfYoBks9kPgsMZ5JCz-o,902 +huggingface_hub/utils/tqdm.py,sha256=2H80n_kDpvp7P4i7MaYR47t41i0l6ODi5mab1oof1dk,6335 diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..eb3dafd90f19de60b3e520aeaf8132402980214d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/entry_points.txt @@ -0,0 +1,6 @@ +[console_scripts] +huggingface-cli = huggingface_hub.commands.huggingface_cli:main + +[fsspec.specs] +hf=huggingface_hub.HfFileSystem + diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b964ccca3c1b6766042b3fe3b2707ba25372924 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub-0.22.2.dist-info/top_level.txt @@ -0,0 +1 @@ +huggingface_hub diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..268c00e0364506fa384d9b019001de22beee5332 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..014c08420ff310d41f114d611d3b4c3743f0aa51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA @@ -0,0 +1,370 @@ +Metadata-Version: 2.1 +Name: pathvalidate +Version: 3.2.0 +Summary: pathvalidate is a Python library to sanitize/validate a string such as filenames/file-paths/etc. +Home-page: https://github.com/thombashi/pathvalidate +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Documentation, https://pathvalidate.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/pathvalidate +Project-URL: Tracker, https://github.com/thombashi/pathvalidate/issues +Project-URL: Changlog, https://github.com/thombashi/pathvalidate/releases +Keywords: file,path,validation,validator,sanitization,sanitizer +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Filesystems +Classifier: Topic :: Text Processing +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: sphinx-rtd-theme >=1.2.2 ; extra == 'docs' +Requires-Dist: Sphinx >=2.4 ; extra == 'docs' +Requires-Dist: urllib3 <2 ; extra == 'docs' +Provides-Extra: test +Requires-Dist: allpairspy >=2 ; extra == 'test' +Requires-Dist: click >=6.2 ; extra == 'test' +Requires-Dist: Faker >=1.0.8 ; extra == 'test' +Requires-Dist: pytest >=6.0.1 ; extra == 'test' +Requires-Dist: pytest-md-report >=0.4.1 ; extra == 'test' +Requires-Dist: pytest-discord >=0.1.4 ; (python_version >= "3.7") and extra == 'test' + +.. contents:: **pathvalidate** + :backlinks: top + :depth: 2 + +Summary +========= +`pathvalidate `__ is a Python library to sanitize/validate a string such as filenames/file-paths/etc. + +.. image:: https://badge.fury.io/py/pathvalidate.svg + :target: https://badge.fury.io/py/pathvalidate + :alt: PyPI package version + +.. image:: https://anaconda.org/thombashi/pathvalidate/badges/version.svg + :target: https://anaconda.org/thombashi/pathvalidate + :alt: conda package version + +.. image:: https://img.shields.io/pypi/pyversions/pathvalidate.svg + :target: https://pypi.org/project/pathvalidate + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/pathvalidate.svg + :target: https://pypi.org/project/pathvalidate + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/pathvalidate/workflows/Tests/badge.svg + :target: https://github.com/thombashi/pathvalidate/actions?query=workflow%3ATests + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/pathvalidate/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/pathvalidate?branch=master + :alt: Test coverage: coveralls + +.. image:: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + +Features +--------- +- Sanitize/Validate a string as a: + - file name + - file path +- Sanitize will do: + - Remove invalid characters for a target platform + - Replace reserved names for a target platform + - Normalize + - Remove unprintable characters +- Argument validator/sanitizer for ``argparse`` and ``click`` +- Multi platform support: + - ``Linux`` + - ``Windows`` + - ``macOS`` + - ``POSIX`` + - ``universal`` (platform independent) +- Multibyte character support + +Examples +========== +Sanitize a filename +--------------------- +:Sample Code: + .. code-block:: python + + from pathvalidate import sanitize_filename + + fname = "fi:l*e/p\"a?t>h|.t {sanitize_filename(fname)}\n") + + fname = "\0_a*b:ce%f/(g)h+i_0.txt" + print(f"{fname} -> {sanitize_filename(fname)}\n") + +:Output: + .. code-block:: + + fi:l*e/p"a?t>h|.t filepath.txt + + _a*b:ce%f/(g)h+i_0.txt -> _abcde%f(g)h+i_0.txt + +The default target ``platform`` is ``universal``. +i.e. the sanitized file name is valid for any platform. + +Sanitize a filepath +--------------------- +:Sample Code: + .. code-block:: python + + from pathvalidate import sanitize_filepath + + fpath = "fi:l*e/p\"a?t>h|.t {sanitize_filepath(fpath)}\n") + + fpath = "\0_a*b:ce%f/(g)h+i_0.txt" + print(f"{fpath} -> {sanitize_filepath(fpath)}\n") + +:Output: + .. code-block:: + + fi:l*e/p"a?t>h|.t file/path.txt + + _a*b:ce%f/(g)h+i_0.txt -> _abcde%f/(g)h+i_0.txt + +Validate a filename +--------------------- +:Sample Code: + .. code-block:: python + + import sys + from pathvalidate import ValidationError, validate_filename + + try: + validate_filename("fi:l*e/p\"a?t>h|.th|.th|.t None: + if filename: + click.echo(f"filename: {filename}") + if filepath: + click.echo(f"filepath: {filepath}") + + + if __name__ == "__main__": + cli() + +:Output: + .. code-block:: + + $ ./examples/click_validate.py --filename ab + filename: ab + $ ./examples/click_validate.py --filepath e?g + Usage: click_validate.py [OPTIONS] + Try 'click_validate.py --help' for help. + + Error: Invalid value for '--filename': [PV1100] invalid characters found: invalids=('?'), value='e?g', platform=Windows + +filename/filepath sanitizer for ``click`` +------------------------------------------- +:Sample Code: + .. code-block:: python + + import click + + from pathvalidate.click import sanitize_filename_arg, sanitize_filepath_arg + + + @click.command() + @click.option("--filename", callback=sanitize_filename_arg) + @click.option("--filepath", callback=sanitize_filepath_arg) + def cli(filename, filepath): + if filename: + click.echo(f"filename: {filename}") + if filepath: + click.echo(f"filepath: {filepath}") + + + if __name__ == "__main__": + cli() + +:Output: + .. code-block:: + + $ ./examples/click_sanitize.py --filename a/b + filename: ab + +For more information +---------------------- +More examples can be found at +https://pathvalidate.rtfd.io/en/latest/pages/examples/index.html + +Installation +============ +Installation: pip +------------------------------ +:: + + pip install pathvalidate + +Installation: conda +------------------------------ +:: + + conda install -c thombashi pathvalidate + +Installation: apt +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-pathvalidate + + +Dependencies +============ +Python 3.7+ +no external dependencies. + +Documentation +=============== +https://pathvalidate.rtfd.io/ + +Sponsors +==================================== +.. image:: https://avatars.githubusercontent.com/u/44389260?s=48&u=6da7176e51ae2654bcfd22564772ef8a3bb22318&v=4 + :target: https://github.com/chasbecker + :alt: Charles Becker (chasbecker) +.. image:: https://avatars.githubusercontent.com/u/9919?s=48&v=4 + :target: https://github.com/github + :alt: onetime: GitHub (github) +.. image:: https://avatars.githubusercontent.com/u/46711571?s=48&u=57687c0e02d5d6e8eeaf9177f7b7af4c9f275eb5&v=4 + :target: https://github.com/Arturi0 + :alt: onetime: Arturi0 +.. image:: https://avatars.githubusercontent.com/u/3658062?s=48&v=4 + :target: https://github.com/b4tman + :alt: onetime: Dmitry Belyaev (b4tman) + +`Become a sponsor `__ + diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..472d39f5440216b77e622c081c7c20454a991771 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD @@ -0,0 +1,35 @@ +pathvalidate-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pathvalidate-3.2.0.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084 +pathvalidate-3.2.0.dist-info/METADATA,sha256=Kc0RTAOHjVPeTIb-Fv8g162B0RcyDzI_Jj2nD9J8Gdk,11747 +pathvalidate-3.2.0.dist-info/RECORD,, +pathvalidate-3.2.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +pathvalidate-3.2.0.dist-info/top_level.txt,sha256=AtoiECsrk-xZknk3ruLi-UweWuXhbKeEGDWFwMcK_ks,13 +pathvalidate/__init__.py,sha256=R8x0yEBF3dfwpTlGe1TJZ9XgOmO-tKGoEvpZgNA83Ys,1926 +pathvalidate/__pycache__/__init__.cpython-310.pyc,, +pathvalidate/__pycache__/__version__.cpython-310.pyc,, +pathvalidate/__pycache__/_base.cpython-310.pyc,, +pathvalidate/__pycache__/_common.cpython-310.pyc,, +pathvalidate/__pycache__/_const.cpython-310.pyc,, +pathvalidate/__pycache__/_filename.cpython-310.pyc,, +pathvalidate/__pycache__/_filepath.cpython-310.pyc,, +pathvalidate/__pycache__/_ltsv.cpython-310.pyc,, +pathvalidate/__pycache__/_symbol.cpython-310.pyc,, +pathvalidate/__pycache__/_types.cpython-310.pyc,, +pathvalidate/__pycache__/argparse.cpython-310.pyc,, +pathvalidate/__pycache__/click.cpython-310.pyc,, +pathvalidate/__pycache__/error.cpython-310.pyc,, +pathvalidate/__pycache__/handler.cpython-310.pyc,, +pathvalidate/__version__.py,sha256=R8MJHDvfFVYjKEFUDzFulsQ9h1EhLDaHtPVwKRedF-E,201 +pathvalidate/_base.py,sha256=NsynjO1IqYaG6rTbGkMx77OIfcUGSv51jLvMvIyyA1A,7443 +pathvalidate/_common.py,sha256=4JLadI56z-1xST0kfgjtiGMWCkmdlcfdrnZn5wIg_9k,3363 +pathvalidate/_const.py,sha256=UzAu38QxKjZDJEcJ-M99sQDnSpALIK7jJoZizFptiBw,686 +pathvalidate/_filename.py,sha256=YEhwJKEq73kLkqInYjbiagGO22q0iswiISzignbWZXE,17356 +pathvalidate/_filepath.py,sha256=z-QgwCNhy8KY6M8hK8JGeUh3YO-P4_7qAE1p9_LFSXc,18915 +pathvalidate/_ltsv.py,sha256=BuCgH-iLdptUbaghoLCXwk7DQFGBBFjuNGeDv2I0IsM,1203 +pathvalidate/_symbol.py,sha256=8kcG9D7IWCdfw3x18I8qSmA09vpHfQB2suVtMloGu28,2326 +pathvalidate/_types.py,sha256=3CRkyBkMvcPcFPigO-Kr18Z6RgGEgUdLK1cXBg8UjWc,180 +pathvalidate/argparse.py,sha256=z_z7inal8sw2wPwFjsMEMQ2zR3kACdK1qsItocXFf3Y,970 +pathvalidate/click.py,sha256=IvaOB4R7ivR3GNPGaROAzOGBcROWIIsZKADJ08hxab4,1077 +pathvalidate/error.py,sha256=t6ePXdcW3ALnv0c_iEDtjLA8hS7USopJamttH5bmnmQ,7531 +pathvalidate/handler.py,sha256=RDOka3TjLz91yqQdLirQmjhFyEt5PVepk6kmGAAes8o,3268 +pathvalidate/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7af485495e2b17cae08c465174724a368c6f087a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pathvalidate diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..fff5008e36be4ff7d4994d6f84c62e89c8b1ac8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA @@ -0,0 +1,212 @@ +Metadata-Version: 2.1 +Name: peft +Version: 0.10.0 +Summary: Parameter-Efficient Fine-Tuning (PEFT) +Home-page: https://github.com/huggingface/peft +Author: The HuggingFace team +Author-email: sourab@huggingface.co +License: Apache +Keywords: deep learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy (>=1.17) +Requires-Dist: packaging (>=20.0) +Requires-Dist: psutil +Requires-Dist: pyyaml +Requires-Dist: torch (>=1.13.0) +Requires-Dist: transformers +Requires-Dist: tqdm +Requires-Dist: accelerate (>=0.21.0) +Requires-Dist: safetensors +Requires-Dist: huggingface-hub (>=0.17.0) +Provides-Extra: dev +Requires-Dist: black ; extra == 'dev' +Requires-Dist: hf-doc-builder ; extra == 'dev' +Requires-Dist: ruff (~=0.2.1) ; extra == 'dev' +Provides-Extra: docs_specific +Requires-Dist: black ; extra == 'docs_specific' +Requires-Dist: hf-doc-builder ; extra == 'docs_specific' +Provides-Extra: quality +Requires-Dist: black ; extra == 'quality' +Requires-Dist: hf-doc-builder ; extra == 'quality' +Requires-Dist: ruff (~=0.2.1) ; extra == 'quality' +Provides-Extra: test +Requires-Dist: black ; extra == 'test' +Requires-Dist: hf-doc-builder ; extra == 'test' +Requires-Dist: ruff (~=0.2.1) ; extra == 'test' +Requires-Dist: pytest ; extra == 'test' +Requires-Dist: pytest-cov ; extra == 'test' +Requires-Dist: pytest-xdist ; extra == 'test' +Requires-Dist: parameterized ; extra == 'test' +Requires-Dist: datasets ; extra == 'test' +Requires-Dist: diffusers (<0.21.0) ; extra == 'test' +Requires-Dist: scipy ; extra == 'test' + + + +

🤗 PEFT

+

+

State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods

+

+ +Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models. + +PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models. + +> [!TIP] +> Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks! + +Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work. + +## Quickstart + +Install PEFT from pip: + +```bash +pip install peft +``` + +Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters! + +```python +from transformers import AutoModelForSeq2SeqLM +from peft import get_peft_config, get_peft_model, LoraConfig, TaskType +model_name_or_path = "bigscience/mt0-large" +tokenizer_name_or_path = "bigscience/mt0-large" + +peft_config = LoraConfig( + task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 +) + +model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) +model = get_peft_model(model, peft_config) +model.print_trainable_parameters() +"trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" +``` + +To load a PEFT model for inference: + +```py +from peft import AutoPeftModelForCausalLM +from transformers import AutoTokenizer +import torch + +model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora").to("cuda") +tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + +model.eval() +inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") + +outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) +print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + +"Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." +``` + +## Why you should use PEFT + +There are many benefits of using PEFT but the main one is the huge savings in compute and storage, making PEFT applicable to many different use cases. + +### High performance on consumer hardware + +Consider the memory requirements for training the following models on the [ought/raft/twitter_complaints](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) dataset with an A100 80GB GPU with more than 64GB of CPU RAM. + +| Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading | +| --------- | ---- | ---- | ---- | +| bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU | +| bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU | +| bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU | + +With LoRA you can fully finetune a 12B parameter model that would've otherwise run out of memory on the 80GB GPU, and comfortably fit and train a 3B parameter model. When you look at the 3B parameter model's performance, it is comparable to a fully finetuned model at a fraction of the GPU memory. + +| Submission Name | Accuracy | +| --------- | ---- | +| Human baseline (crowdsourced) | 0.897 | +| Flan-T5 | 0.892 | +| lora-t0-3b | 0.863 | + +> [!TIP] +> The bigscience/T0_3B model performance isn't optimized in the table above. You can squeeze even more performance out of it by playing around with the input instruction templates, LoRA hyperparameters, and other training related hyperparameters. The final checkpoint size of this model is just 19MB compared to 11GB of the full bigscience/T0_3B model. Learn more about the advantages of finetuning with PEFT in this [blog post](https://www.philschmid.de/fine-tune-flan-t5-peft). + +### Quantization + +Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference. + +* Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post. +* Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset). + +### Save compute and storage + +PEFT can help you save storage by avoiding full finetuning of models on each of downstream task or dataset. In many cases, you're only finetuning a very small fraction of a model's parameters and each checkpoint is only a few MBs in size (instead of GBs). These smaller PEFT adapters demonstrate performance comparable to a fully finetuned model. If you have many datasets, you can save a lot of storage with a PEFT model and not have to worry about catastrophic forgetting or overfitting the backbone or base model. + +## PEFT integrations + +PEFT is widely supported across the Hugging Face ecosystem because of the massive efficiency it brings to training and inference. + +### Diffusers + +The iterative diffusion process consumes a lot of memory which can make it difficult to train. PEFT can help reduce the memory requirements and reduce the storage size of the final model checkpoint. For example, consider the memory required for training a Stable Diffusion model with LoRA on an A100 80GB GPU with more than 64GB of CPU RAM. The final model checkpoint size is only 8.8MB! + +| Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing | +| --------- | ---- | ---- | ---- | +| CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU | + +> [!TIP] +> Take a look at the [examples/lora_dreambooth/train_dreambooth.py](examples/lora_dreambooth/train_dreambooth.py) training script to try training your own Stable Diffusion model with LoRA, and play around with the [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth) Space which is running on a T4 instance. Learn more about the PEFT integration in Diffusers in this [tutorial](https://huggingface.co/docs/peft/main/en/tutorial/peft_integrations#diffusers). + +### Accelerate + +[Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources. + +### TRL + +PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading: + +* [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM. +* [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews. +* [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning. + +## Model support + +Use this [Space](https://stevhliu-peft-methods.hf.space) or check out the [docs](https://huggingface.co/docs/peft/main/en/index) to find which models officially support a PEFT method out of the box. Even if you don't see a model listed below, you can manually configure the model config to enable PEFT for a model. Read the [New transformers architecture](https://huggingface.co/docs/peft/main/en/developer_guides/custom_models#new-transformers-architectures) guide to learn how. + +## Contribute + +If you would like to contribute to PEFT, please check out our [contribution guide](https://huggingface.co/docs/peft/developer_guides/contributing). + +## Citing 🤗 PEFT + +To use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry. + +```bibtex +@Misc{peft, + title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods}, + author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan}, + howpublished = {\url{https://github.com/huggingface/peft}}, + year = {2022} +} +``` diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d8a9ed582072de582eb8b79a65d99d2b4da41700 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD @@ -0,0 +1,157 @@ +peft-0.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +peft-0.10.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +peft-0.10.0.dist-info/METADATA,sha256=rVMHyukd8k_nEqs19E5ZPaco9GqrkHrI1m6LGW3ZTc4,13215 +peft-0.10.0.dist-info/RECORD,, +peft-0.10.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +peft-0.10.0.dist-info/top_level.txt,sha256=DOKoqHe6fr-A3g26PPWvf5bHLy8fHKhflUO5xzJJEUY,5 +peft/__init__.py,sha256=kL2rBDEEvPAJ2CpnXnwOa5IXylk9a7KRTJXSAedS6ZE,2514 +peft/__pycache__/__init__.cpython-310.pyc,, +peft/__pycache__/auto.cpython-310.pyc,, +peft/__pycache__/config.cpython-310.pyc,, +peft/__pycache__/helpers.cpython-310.pyc,, +peft/__pycache__/import_utils.cpython-310.pyc,, +peft/__pycache__/mapping.cpython-310.pyc,, +peft/__pycache__/mixed_model.cpython-310.pyc,, +peft/__pycache__/peft_model.cpython-310.pyc,, +peft/auto.py,sha256=03OkfIQ_xhziupLLVP5wFYt8pmMOxwjCD5ik1vuNHVk,6568 +peft/config.py,sha256=W6kNwSDRySZyV_1tBkQzim6PMmM6s4VAQCd8d__Q_fE,10908 +peft/helpers.py,sha256=ycZIsMacCi_-WLhsQsWsiweFr3iS8EIVIBDYfcQYBc0,4423 +peft/import_utils.py,sha256=PefA5udnA0LhTOjLvsPsLuDIOQsbvdtm_klzxsNUmAA,2382 +peft/mapping.py,sha256=e7Ei7fcLwVNAuiiZmbitmifqvKTrusnQkd1nzdQh-Vs,5916 +peft/mixed_model.py,sha256=swX0HCZedYXDPASwTqdNn3FGBhzqhRCWRKdHDLg6pV4,16572 +peft/peft_model.py,sha256=kWt8pvOAzFsv4D8uMh1B8jmkA0PEAlSDpf5hul8KxMQ,89505 +peft/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +peft/tuners/__init__.py,sha256=N75esatM3Zuf1BmfjWqnCQSv-IevaNtKsWanFPMzq8g,1535 +peft/tuners/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc,, +peft/tuners/__pycache__/tuners_utils.cpython-310.pyc,, +peft/tuners/adalora/__init__.py,sha256=iT-UeicKex8znLIwoOr642odMVues4KZneN_e1Hz6MQ,1298 +peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc,, +peft/tuners/adalora/__pycache__/config.cpython-310.pyc,, +peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc,, +peft/tuners/adalora/__pycache__/layer.cpython-310.pyc,, +peft/tuners/adalora/__pycache__/model.cpython-310.pyc,, +peft/tuners/adalora/bnb.py,sha256=QZeSa84I564wHq-PjNKiX3IUWi9jYW1t_AlVxroNhCs,5606 +peft/tuners/adalora/config.py,sha256=P0yXBb0OfTXEUrgfqCkVauAVIOm4Q8UfCNlgt4mdY2Q,2659 +peft/tuners/adalora/gptq.py,sha256=nsehwVjP9e1H6rXwuS3dID8lqEQbRXK98Hogzsm8MeE,2719 +peft/tuners/adalora/layer.py,sha256=z6dXGLNNastpo0xZWoX83R9lNxOwJJRehbhdx6KUhZM,14352 +peft/tuners/adalora/model.py,sha256=XqcyNkxUfXwAz60QnMK60gfXhE-ZhgKUXUpCHbDYlko,15302 +peft/tuners/adaption_prompt/__init__.py,sha256=gOenS_7j87CTvgb_xPaW2K8-PfVwHndb3EHdofV3BGU,794 +peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc,, +peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc,, +peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc,, +peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc,, +peft/tuners/adaption_prompt/config.py,sha256=_wECOCgJBVEg1YDOqpuMt6Glo3OglFwdHfsnfQJZdms,2803 +peft/tuners/adaption_prompt/layer.py,sha256=GCBW_2eEGmUOZS_FyQ-HgbbWOJkCLCKq89B3quKTKLs,5833 +peft/tuners/adaption_prompt/model.py,sha256=wUcz0G8UBXCchOWP14zj-iKbbd707WR-ti6wj60vuck,7464 +peft/tuners/adaption_prompt/utils.py,sha256=W9qL4LLgwyM0jMcSvQOy1DzEzADVRzL61Gh9Hx_rRvw,5708 +peft/tuners/ia3/__init__.py,sha256=72N2yY-5xQRq5cxmxLkm73JwNF3AZUhG8hdJ4g834uU,1185 +peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc,, +peft/tuners/ia3/__pycache__/config.cpython-310.pyc,, +peft/tuners/ia3/__pycache__/layer.cpython-310.pyc,, +peft/tuners/ia3/__pycache__/model.cpython-310.pyc,, +peft/tuners/ia3/bnb.py,sha256=GvoVKQS0t4u5Xnq-isEMLi8m_VgZdBZPc6LrVEZdHkM,4668 +peft/tuners/ia3/config.py,sha256=K27xLoIIo_meHt--iE9H-kfmLUycXNP2eV2cBQJKERE,5152 +peft/tuners/ia3/layer.py,sha256=4CMLwAKN4N2dvuWeabVmRxqAAoqznHnYcEdUrMYLrEI,13661 +peft/tuners/ia3/model.py,sha256=5wL76PXcMV197yH7u0Sa1L3zCjTVWEvSN0VFysG34fo,16597 +peft/tuners/loha/__init__.py,sha256=lHzf9_TXvsqW6SCVnMjeMsgqD6Vdv8c6L2Ob6joeoio,777 +peft/tuners/loha/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/loha/__pycache__/config.cpython-310.pyc,, +peft/tuners/loha/__pycache__/layer.cpython-310.pyc,, +peft/tuners/loha/__pycache__/model.cpython-310.pyc,, +peft/tuners/loha/config.py,sha256=aKe2JegDgAj4l9oV-SQr1UWo80LivP45jM35fpXOEfc,6037 +peft/tuners/loha/layer.py,sha256=BtPrDc1NtpcYj2_NW0s25lC4S--jVvMFyVwBnURCBFM,15606 +peft/tuners/loha/model.py,sha256=boeR0HwZu8F1EJEM1zb7gs5jaAKO_aKXu1TsIUwDuH8,4205 +peft/tuners/lokr/__init__.py,sha256=s8o_tkrdXpN7ZUXEMUoFxXxmg8_Fj9pRHlDqn16Ie8c,777 +peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/lokr/__pycache__/config.cpython-310.pyc,, +peft/tuners/lokr/__pycache__/layer.cpython-310.pyc,, +peft/tuners/lokr/__pycache__/model.cpython-310.pyc,, +peft/tuners/lokr/config.py,sha256=OworzmruIz37l3BsQ0C-7sULL7Es4AUMuKbK_wWvjrk,6305 +peft/tuners/lokr/layer.py,sha256=D8VJ6MXnexS6ieFvXGciJObj_NMjPvHG1-7zrDBc1Tk,15469 +peft/tuners/lokr/model.py,sha256=s6OMs72nQJXZnDSjQW3HX9UrprktIP0p61l2ymsZgQY,4259 +peft/tuners/lora/__init__.py,sha256=3J6qLQHY9iZbkJ5bgD0uDUtXicIn4cDQi8z5lnjTF4s,1288 +peft/tuners/lora/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc,, +peft/tuners/lora/__pycache__/awq.cpython-310.pyc,, +peft/tuners/lora/__pycache__/bnb.cpython-310.pyc,, +peft/tuners/lora/__pycache__/config.cpython-310.pyc,, +peft/tuners/lora/__pycache__/gptq.cpython-310.pyc,, +peft/tuners/lora/__pycache__/layer.cpython-310.pyc,, +peft/tuners/lora/__pycache__/model.cpython-310.pyc,, +peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc,, +peft/tuners/lora/aqlm.py,sha256=jqxOpFyTIOtRJJe5YzcF0vxIPSUdEW4kAvnzkollhS4,3320 +peft/tuners/lora/awq.py,sha256=Y7_sgl3WorxAtppyCuzLmCH62FaW_IQE6RwQ_YoH4PA,3693 +peft/tuners/lora/bnb.py,sha256=fFfpehst9kRqQR2xBF2o6dCLmxIIaiHVMFXO-rLCsH4,22415 +peft/tuners/lora/config.py,sha256=02hCQLNna2kD8p5PxdTKx1TpwUuZj0hqWeOm8GAoacI,17322 +peft/tuners/lora/gptq.py,sha256=dMFl7H157DkHVD7sjX9BE8nUGdMXQG46VUqn5wYWn6o,3969 +peft/tuners/lora/layer.py,sha256=sK10nz2sUO8YMKrOue48-Ey7qjlNPRpGaoEFnCCKcNo,47596 +peft/tuners/lora/model.py,sha256=E2bSxGxK-tRkEhcM969fg-5Wf5BTH7QlEKPNDsDHvpo,34612 +peft/tuners/lora/tp_layer.py,sha256=xfvluzH_7D5cPicodHJPILNvpPL4F2Qpfi5SOaH1GmM,8829 +peft/tuners/lycoris_utils.py,sha256=JAL2tBhcYTHy8V74TGEKXceTxlAIhWa21xFH800av70,16629 +peft/tuners/mixed/__init__.py,sha256=see7CbOiJJ-E8W1QSSBtIK4oQfFnkJOUVU5xckCYyzw,706 +peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/mixed/__pycache__/model.cpython-310.pyc,, +peft/tuners/mixed/model.py,sha256=eLqWsKvNcTii3Kgcbqnp2jv4_LtQwixRJd9Ayn_vDhA,15006 +peft/tuners/multitask_prompt_tuning/__init__.py,sha256=_Vm3xHt9URAAAYg_XtA-dWJC-vsNs39hW8Bntym3L-I,819 +peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc,, +peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc,, +peft/tuners/multitask_prompt_tuning/config.py,sha256=5G9MbSB9m2DCxhwYE5RZ6ZvSfQvt641k_PBz7XA3Ac8,2446 +peft/tuners/multitask_prompt_tuning/model.py,sha256=GczbBvzlm_VKrRrB1I-MW9Exm6S9-hQIGCdA5me2eoU,4659 +peft/tuners/oft/__init__.py,sha256=B9DhFqanuJW0VSZa9Fwe-lUBw4UczAv3BvsQaKu8whE,771 +peft/tuners/oft/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/oft/__pycache__/config.cpython-310.pyc,, +peft/tuners/oft/__pycache__/layer.cpython-310.pyc,, +peft/tuners/oft/__pycache__/model.cpython-310.pyc,, +peft/tuners/oft/config.py,sha256=mLSje1UnT2Aor1R7miKsKCKKMeQPc8vUC3SBUCkw4EE,5826 +peft/tuners/oft/layer.py,sha256=cWfJguXCVTzworW8XW9ndp54iSVLrPUVMoTmQ2PZoiM,15591 +peft/tuners/oft/model.py,sha256=DJzJIrKd0MGqCpoNQXSstC5cfACsXt6IK4C8y8Rc-yg,3695 +peft/tuners/p_tuning/__init__.py,sha256=XUQbMT5GTftYRCGNkWiWfAJRBDuv8pWnALRW67HtEDU,801 +peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc,, +peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc,, +peft/tuners/p_tuning/config.py,sha256=konpCRWcmzPQgDkvz0gA-xC856poHpyYAWXqES7qBzk,2110 +peft/tuners/p_tuning/model.py,sha256=rv2mbmPOArAefeqhJ09Oz7XNGaScWGRT1hgKlrfhfAw,5575 +peft/tuners/poly/__init__.py,sha256=yCiWTO7o2rWmvAM4CNKyllvIvtE6_QnHuEjbKx7jhgI,759 +peft/tuners/poly/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/poly/__pycache__/config.cpython-310.pyc,, +peft/tuners/poly/__pycache__/layer.cpython-310.pyc,, +peft/tuners/poly/__pycache__/model.cpython-310.pyc,, +peft/tuners/poly/__pycache__/router.cpython-310.pyc,, +peft/tuners/poly/config.py,sha256=FqAjrybzzD4eATX_jG-GBr4QeBDOVE3xTcXKCrQn-dw,3783 +peft/tuners/poly/layer.py,sha256=ArvY2sOnI6xVI-mglFfevOKKKXJPhs9xtFivZkOWWnA,6890 +peft/tuners/poly/model.py,sha256=VtzkgWnjnbAAAf1WMNMUoPUTq6-W58kKIJRWmlinMBk,6782 +peft/tuners/poly/router.py,sha256=qIX6jEI_FSb5Rr5YhIWo8Cr5ZiAmDCiydT7lTuEICp8,2800 +peft/tuners/prefix_tuning/__init__.py,sha256=KCQnKQoFTfx0M2HkQANF36PO6y62oD24gmkdGnpWrXc,723 +peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc,, +peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc,, +peft/tuners/prefix_tuning/config.py,sha256=JUCnCtime_7V5iJyX6iBk_8SeYCpJ_Mw-RGYmgzz2Qg,1386 +peft/tuners/prefix_tuning/model.py,sha256=FyE_EBtvvA9bZDX-GRWN6s0MQtcUe57PLD4qRT2ACww,3007 +peft/tuners/prompt_tuning/__init__.py,sha256=HyENdH-fwAAdwsEqFX4Pd4MLfCAZxBiXGqyjxvU4CAE,765 +peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc,, +peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc,, +peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc,, +peft/tuners/prompt_tuning/config.py,sha256=RbfmCdnbQ6t0PTwo8NcnVbkQKyHIJaljptRRhRClhJ0,3472 +peft/tuners/prompt_tuning/model.py,sha256=rCCft8hRjVtQIR536hUfhT1ICBuDqWBGSTMPGNdAwTg,3623 +peft/tuners/tuners_utils.py,sha256=iwIWbOXidvE_FlviBi5rZlbjBIRNxIraAwxnw9hXyds,31283 +peft/utils/__init__.py,sha256=HA_S0vk8wMs_85BHxyX3pfdvWhl9F3XxvVzUOTsg42Q,1947 +peft/utils/__pycache__/__init__.cpython-310.pyc,, +peft/utils/__pycache__/constants.cpython-310.pyc,, +peft/utils/__pycache__/integrations.cpython-310.pyc,, +peft/utils/__pycache__/loftq_utils.cpython-310.pyc,, +peft/utils/__pycache__/merge_utils.cpython-310.pyc,, +peft/utils/__pycache__/other.cpython-310.pyc,, +peft/utils/__pycache__/peft_types.cpython-310.pyc,, +peft/utils/__pycache__/save_and_load.cpython-310.pyc,, +peft/utils/constants.py,sha256=6B6JZDmrqqCPtyU8IA8uaXRkibv-IBID6i19CV3A7C4,5751 +peft/utils/integrations.py,sha256=yFJVpki0wJWqHXADdKbMX4Rpa-jkpl0mJ9l5DAVcwTI,2568 +peft/utils/loftq_utils.py,sha256=ouhJhFkrVl31Q0TvMHWLqmiTkFOixq-aSmzappRa4k4,17180 +peft/utils/merge_utils.py,sha256=IAd6DlbPowxAEiuC5OaMwg9hmdO6068DOU18CJ00VIU,9905 +peft/utils/other.py,sha256=gDmVrWGxGCK0KlwBt3JueE7O28uhoPuCbANX0QcbU_8,24273 +peft/utils/peft_types.py,sha256=CIvazG4k07ONOacJKIS8iigYmRj-GxG37v7q57ZzKE0,2147 +peft/utils/save_and_load.py,sha256=5GKT_XQ1xbz9XLUErnmolBWcAgHU_EHaMx6kmoL60fI,14820 diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc89ba063e639dfa24fd7f53340bc368adbebb7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt @@ -0,0 +1 @@ +peft diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__init__.py b/env-llmeval/lib/python3.10/site-packages/regex/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb06564ab033a2b0b501f7f41efb169dacd1f801 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/regex/__init__.py @@ -0,0 +1,3 @@ +from .regex import * +from . import regex +__all__ = regex.__all__ diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf0ea57ae28aef9d0ea5f884605a19381b78b7fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1683a04c61229d2cf1ad6fd0e2f1d609c1b7938e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1729cfc7bc9e57f37fed836c6680d6e2c8a3db1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b68ba84e64140751d1d4ed7dfedf25dc1e57c72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py b/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py new file mode 100644 index 0000000000000000000000000000000000000000..c2314f80011256ca81cfb38f65b3485e1956e94e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py @@ -0,0 +1,4492 @@ +# +# Secret Labs' Regular Expression Engine core module +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# +# 2010-01-16 mrab Python front-end re-written and extended + +import enum +import string +import unicodedata +from collections import defaultdict + +import regex._regex as _regex + +__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", + "F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", + "POSIX", "R", "REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE", + "V0", "VERSION0", "V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error", + "Scanner", "RegexFlag"] + +# The regex exception. +class error(Exception): + """Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The position in the pattern where compilation failed, or None + lineno: The line number where compilation failed, unless pos is None + colno: The column number where compilation failed, unless pos is None + """ + + def __init__(self, message, pattern=None, pos=None): + newline = '\n' if isinstance(pattern, str) else b'\n' + self.msg = message + self.pattern = pattern + self.pos = pos + if pattern is not None and pos is not None: + self.lineno = pattern.count(newline, 0, pos) + 1 + self.colno = pos - pattern.rfind(newline, 0, pos) + + message = "{} at position {}".format(message, pos) + + if newline in pattern: + message += " (line {}, column {})".format(self.lineno, + self.colno) + + Exception.__init__(self, message) + +# The exception for when a positional flag has been turned on in the old +# behaviour. +class _UnscopedFlagSet(Exception): + pass + +# The exception for when parsing fails and we want to try something else. +class ParseError(Exception): + pass + +# The exception for when there isn't a valid first set. +class _FirstSetError(Exception): + pass + +# Flags. +class RegexFlag(enum.IntFlag): + A = ASCII = 0x80 # Assume ASCII locale. + B = BESTMATCH = 0x1000 # Best fuzzy match. + D = DEBUG = 0x200 # Print parsed pattern. + E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first + # fuzzy match. + F = FULLCASE = 0x4000 # Unicode full case-folding. + I = IGNORECASE = 0x2 # Ignore case. + L = LOCALE = 0x4 # Assume current 8-bit locale. + M = MULTILINE = 0x8 # Make anchors look for newline. + P = POSIX = 0x10000 # POSIX-style matching (leftmost longest). + R = REVERSE = 0x400 # Search backwards. + S = DOTALL = 0x10 # Make dot match newline. + U = UNICODE = 0x20 # Assume Unicode locale. + V0 = VERSION0 = 0x2000 # Old legacy behaviour. + V1 = VERSION1 = 0x100 # New enhanced behaviour. + W = WORD = 0x800 # Default Unicode word breaks. + X = VERBOSE = 0x40 # Ignore whitespace and comments. + T = TEMPLATE = 0x1 # Template (present because re module has it). + + def __repr__(self): + if self._name_ is not None: + return 'regex.%s' % self._name_ + + value = self._value_ + members = [] + negative = value < 0 + + if negative: + value = ~value + + for m in self.__class__: + if value & m._value_: + value &= ~m._value_ + members.append('regex.%s' % m._name_) + + if value: + members.append(hex(value)) + + res = '|'.join(members) + + if negative: + if len(members) > 1: + res = '~(%s)' % res + else: + res = '~%s' % res + + return res + + __str__ = object.__str__ + +globals().update(RegexFlag.__members__) + +DEFAULT_VERSION = VERSION1 + +_ALL_VERSIONS = VERSION0 | VERSION1 +_ALL_ENCODINGS = ASCII | LOCALE | UNICODE + +# The default flags for the various versions. +DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE} + +# The mask for the flags. +GLOBAL_FLAGS = (_ALL_VERSIONS | BESTMATCH | DEBUG | ENHANCEMATCH | POSIX | + REVERSE) +SCOPED_FLAGS = (FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE | + _ALL_ENCODINGS) + +ALPHA = frozenset(string.ascii_letters) +DIGITS = frozenset(string.digits) +ALNUM = ALPHA | DIGITS +OCT_DIGITS = frozenset(string.octdigits) +HEX_DIGITS = frozenset(string.hexdigits) +SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""]) +NAMED_CHAR_PART = ALNUM | frozenset(" -") +PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.") +SET_OPS = ("||", "~~", "&&", "--") + +# The width of the code words inside the regex engine. +BYTES_PER_CODE = _regex.get_code_size() +BITS_PER_CODE = BYTES_PER_CODE * 8 + +# The repeat count which represents infinity. +UNLIMITED = (1 << BITS_PER_CODE) - 1 + +# The regular expression flags. +REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE, + "i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "p": POSIX, "r": REVERSE, + "s": DOTALL, "u": UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x": + VERBOSE} + +# The case flags. +CASE_FLAGS = FULLCASE | IGNORECASE +NOCASE = 0 +FULLIGNORECASE = FULLCASE | IGNORECASE + +FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE + +CASE_FLAGS_COMBINATIONS = {0: 0, FULLCASE: 0, IGNORECASE: IGNORECASE, + FULLIGNORECASE: FULLIGNORECASE} + +# The number of digits in hexadecimal escapes. +HEX_ESCAPES = {"x": 2, "u": 4, "U": 8} + +# The names of the opcodes. +OPCODES = """ +FAILURE +SUCCESS +ANY +ANY_ALL +ANY_ALL_REV +ANY_REV +ANY_U +ANY_U_REV +ATOMIC +BOUNDARY +BRANCH +CALL_REF +CHARACTER +CHARACTER_IGN +CHARACTER_IGN_REV +CHARACTER_REV +CONDITIONAL +DEFAULT_BOUNDARY +DEFAULT_END_OF_WORD +DEFAULT_START_OF_WORD +END +END_OF_LINE +END_OF_LINE_U +END_OF_STRING +END_OF_STRING_LINE +END_OF_STRING_LINE_U +END_OF_WORD +FUZZY +GRAPHEME_BOUNDARY +GREEDY_REPEAT +GROUP +GROUP_CALL +GROUP_EXISTS +KEEP +LAZY_REPEAT +LOOKAROUND +NEXT +PROPERTY +PROPERTY_IGN +PROPERTY_IGN_REV +PROPERTY_REV +PRUNE +RANGE +RANGE_IGN +RANGE_IGN_REV +RANGE_REV +REF_GROUP +REF_GROUP_FLD +REF_GROUP_FLD_REV +REF_GROUP_IGN +REF_GROUP_IGN_REV +REF_GROUP_REV +SEARCH_ANCHOR +SET_DIFF +SET_DIFF_IGN +SET_DIFF_IGN_REV +SET_DIFF_REV +SET_INTER +SET_INTER_IGN +SET_INTER_IGN_REV +SET_INTER_REV +SET_SYM_DIFF +SET_SYM_DIFF_IGN +SET_SYM_DIFF_IGN_REV +SET_SYM_DIFF_REV +SET_UNION +SET_UNION_IGN +SET_UNION_IGN_REV +SET_UNION_REV +SKIP +START_OF_LINE +START_OF_LINE_U +START_OF_STRING +START_OF_WORD +STRING +STRING_FLD +STRING_FLD_REV +STRING_IGN +STRING_IGN_REV +STRING_REV +FUZZY_EXT +""" + +# Define the opcodes in a namespace. +class Namespace: + pass + +OP = Namespace() +for i, op in enumerate(OPCODES.split()): + setattr(OP, op, i) + +def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5): + """Make room in the given cache. + + Args: + cache_dict: The cache dictionary to modify. + args_dict: The dictionary of named list args used by patterns. + max_length: Maximum # of entries in cache_dict before it is shrunk. + divisor: Cache will shrink to max_length - 1/divisor*max_length items. + """ + # Toss out a fraction of the entries at random to make room for new ones. + # A random algorithm was chosen as opposed to simply cache_dict.popitem() + # as popitem could penalize the same regular expression repeatedly based + # on its internal hash value. Being random should spread the cache miss + # love around. + cache_keys = tuple(cache_dict.keys()) + overage = len(cache_keys) - max_length + if overage < 0: + # Cache is already within limits. Normally this should not happen + # but it could due to multithreading. + return + + number_to_toss = max_length // divisor + overage + + # The import is done here to avoid a circular dependency. + import random + if not hasattr(random, 'sample'): + # Do nothing while resolving the circular dependency: + # re->random->warnings->tokenize->string->re + return + + for doomed_key in random.sample(cache_keys, number_to_toss): + try: + del cache_dict[doomed_key] + except KeyError: + # Ignore problems if the cache changed from another thread. + pass + + # Rebuild the arguments and locale-sensitivity dictionaries. + args_dict.clear() + sensitivity_dict = {} + for pattern, pattern_type, flags, args, default_version, locale in tuple(cache_dict): + args_dict[pattern, pattern_type, flags, default_version, locale] = args + try: + sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern] + except KeyError: + pass + + locale_sensitive.clear() + locale_sensitive.update(sensitivity_dict) + +def _fold_case(info, string): + "Folds the case of a string." + flags = info.flags + if (flags & _ALL_ENCODINGS) == 0: + flags |= info.guess_encoding + + return _regex.fold_case(flags, string) + +def is_cased_i(info, char): + "Checks whether a character is cased." + return len(_regex.get_all_cases(info.flags, char)) > 1 + +def is_cased_f(flags, char): + "Checks whether a character is cased." + return len(_regex.get_all_cases(flags, char)) > 1 + +def _compile_firstset(info, fs): + "Compiles the firstset for the pattern." + reverse = bool(info.flags & REVERSE) + fs = _check_firstset(info, reverse, fs) + if not fs: + return [] + + # Compile the firstset. + return fs.compile(reverse) + +def _check_firstset(info, reverse, fs): + "Checks the firstset for the pattern." + if not fs or None in fs: + return None + + # If we ignore the case, for simplicity we won't build a firstset. + members = set() + case_flags = NOCASE + for i in fs: + if isinstance(i, Character) and not i.positive: + return None + +# if i.case_flags: +# if isinstance(i, Character): +# if is_cased_i(info, i.value): +# return [] +# elif isinstance(i, SetBase): +# return [] + case_flags |= i.case_flags + members.add(i.with_flags(case_flags=NOCASE)) + + if case_flags == (FULLCASE | IGNORECASE): + return None + + # Build the firstset. + fs = SetUnion(info, list(members), case_flags=case_flags & ~FULLCASE, + zerowidth=True) + fs = fs.optimise(info, reverse, in_set=True) + + return fs + +def _flatten_code(code): + "Flattens the code from a list of tuples." + flat_code = [] + for c in code: + flat_code.extend(c) + + return flat_code + +def make_case_flags(info): + "Makes the case flags." + flags = info.flags & CASE_FLAGS + + # Turn off FULLCASE if ASCII is turned on. + if info.flags & ASCII: + flags &= ~FULLCASE + + return flags + +def make_character(info, value, in_set=False): + "Makes a character literal." + if in_set: + # A character set is built case-sensitively. + return Character(value) + + return Character(value, case_flags=make_case_flags(info)) + +def make_ref_group(info, name, position): + "Makes a group reference." + return RefGroup(info, name, position, case_flags=make_case_flags(info)) + +def make_string_set(info, name): + "Makes a string set." + return StringSet(info, name, case_flags=make_case_flags(info)) + +def make_property(info, prop, in_set): + "Makes a property." + if in_set: + return prop + + return prop.with_flags(case_flags=make_case_flags(info)) + +def _parse_pattern(source, info): + "Parses a pattern, eg. 'a|b|c'." + branches = [parse_sequence(source, info)] + while source.match("|"): + branches.append(parse_sequence(source, info)) + + if len(branches) == 1: + return branches[0] + return Branch(branches) + +def parse_sequence(source, info): + "Parses a sequence, eg. 'abc'." + sequence = [None] + case_flags = make_case_flags(info) + while True: + saved_pos = source.pos + ch = source.get() + if ch in SPECIAL_CHARS: + if ch in ")|": + # The end of a sequence. At the end of the pattern ch is "". + source.pos = saved_pos + break + elif ch == "\\": + # An escape sequence outside a set. + sequence.append(parse_escape(source, info, False)) + elif ch == "(": + # A parenthesised subpattern or a flag. + element = parse_paren(source, info) + if element is None: + case_flags = make_case_flags(info) + else: + sequence.append(element) + elif ch == ".": + # Any character. + if info.flags & DOTALL: + sequence.append(AnyAll()) + elif info.flags & WORD: + sequence.append(AnyU()) + else: + sequence.append(Any()) + elif ch == "[": + # A character set. + sequence.append(parse_set(source, info)) + elif ch == "^": + # The start of a line or the string. + if info.flags & MULTILINE: + if info.flags & WORD: + sequence.append(StartOfLineU()) + else: + sequence.append(StartOfLine()) + else: + sequence.append(StartOfString()) + elif ch == "$": + # The end of a line or the string. + if info.flags & MULTILINE: + if info.flags & WORD: + sequence.append(EndOfLineU()) + else: + sequence.append(EndOfLine()) + else: + if info.flags & WORD: + sequence.append(EndOfStringLineU()) + else: + sequence.append(EndOfStringLine()) + elif ch in "?*+{": + # Looks like a quantifier. + counts = parse_quantifier(source, info, ch) + if counts: + # It _is_ a quantifier. + apply_quantifier(source, info, counts, case_flags, ch, + saved_pos, sequence) + sequence.append(None) + else: + # It's not a quantifier. Maybe it's a fuzzy constraint. + constraints = parse_fuzzy(source, info, ch, case_flags) + if constraints: + # It _is_ a fuzzy constraint. + apply_constraint(source, info, constraints, case_flags, + saved_pos, sequence) + sequence.append(None) + else: + # The element was just a literal. + sequence.append(Character(ord(ch), + case_flags=case_flags)) + else: + # A literal. + sequence.append(Character(ord(ch), case_flags=case_flags)) + else: + # A literal. + sequence.append(Character(ord(ch), case_flags=case_flags)) + + sequence = [item for item in sequence if item is not None] + return Sequence(sequence) + +def apply_quantifier(source, info, counts, case_flags, ch, saved_pos, + sequence): + element = sequence.pop() + if element is None: + if sequence: + raise error("multiple repeat", source.string, saved_pos) + raise error("nothing to repeat", source.string, saved_pos) + + if isinstance(element, (GreedyRepeat, LazyRepeat, PossessiveRepeat)): + raise error("multiple repeat", source.string, saved_pos) + + min_count, max_count = counts + saved_pos = source.pos + ch = source.get() + if ch == "?": + # The "?" suffix that means it's a lazy repeat. + repeated = LazyRepeat + elif ch == "+": + # The "+" suffix that means it's a possessive repeat. + repeated = PossessiveRepeat + else: + # No suffix means that it's a greedy repeat. + source.pos = saved_pos + repeated = GreedyRepeat + + # Ignore the quantifier if it applies to a zero-width item or the number of + # repeats is fixed at 1. + if not element.is_empty() and (min_count != 1 or max_count != 1): + element = repeated(element, min_count, max_count) + + sequence.append(element) + +def apply_constraint(source, info, constraints, case_flags, saved_pos, + sequence): + element = sequence.pop() + if element is None: + raise error("nothing for fuzzy constraint", source.string, saved_pos) + + # If a group is marked as fuzzy then put all of the fuzzy part in the + # group. + if isinstance(element, Group): + element.subpattern = Fuzzy(element.subpattern, constraints) + sequence.append(element) + else: + sequence.append(Fuzzy(element, constraints)) + +_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)} + +def parse_quantifier(source, info, ch): + "Parses a quantifier." + q = _QUANTIFIERS.get(ch) + if q: + # It's a quantifier. + return q + + if ch == "{": + # Looks like a limited repeated element, eg. 'a{2,3}'. + counts = parse_limited_quantifier(source) + if counts: + return counts + + return None + +def is_above_limit(count): + "Checks whether a count is above the maximum." + return count is not None and count >= UNLIMITED + +def parse_limited_quantifier(source): + "Parses a limited quantifier." + saved_pos = source.pos + min_count = parse_count(source) + if source.match(","): + max_count = parse_count(source) + + # No minimum means 0 and no maximum means unlimited. + min_count = int(min_count or 0) + max_count = int(max_count) if max_count else None + else: + if not min_count: + source.pos = saved_pos + return None + + min_count = max_count = int(min_count) + + if not source.match ("}"): + source.pos = saved_pos + return None + + if is_above_limit(min_count) or is_above_limit(max_count): + raise error("repeat count too big", source.string, saved_pos) + + if max_count is not None and min_count > max_count: + raise error("min repeat greater than max repeat", source.string, + saved_pos) + + return min_count, max_count + +def parse_fuzzy(source, info, ch, case_flags): + "Parses a fuzzy setting, if present." + saved_pos = source.pos + + if ch != "{": + return None + + constraints = {} + try: + parse_fuzzy_item(source, constraints) + while source.match(","): + parse_fuzzy_item(source, constraints) + except ParseError: + source.pos = saved_pos + return None + + if source.match(":"): + constraints["test"] = parse_fuzzy_test(source, info, case_flags) + + if not source.match("}"): + raise error("expected }", source.string, source.pos) + + return constraints + +def parse_fuzzy_item(source, constraints): + "Parses a fuzzy setting item." + saved_pos = source.pos + try: + parse_cost_constraint(source, constraints) + except ParseError: + source.pos = saved_pos + + parse_cost_equation(source, constraints) + +def parse_cost_constraint(source, constraints): + "Parses a cost constraint." + saved_pos = source.pos + ch = source.get() + if ch in ALPHA: + # Syntax: constraint [("<=" | "<") cost] + constraint = parse_constraint(source, constraints, ch) + + max_inc = parse_fuzzy_compare(source) + + if max_inc is None: + # No maximum cost. + constraints[constraint] = 0, None + else: + # There's a maximum cost. + cost_pos = source.pos + max_cost = parse_cost_limit(source) + + # Inclusive or exclusive limit? + if not max_inc: + max_cost -= 1 + + if max_cost < 0: + raise error("bad fuzzy cost limit", source.string, cost_pos) + + constraints[constraint] = 0, max_cost + elif ch in DIGITS: + # Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost + source.pos = saved_pos + + # Minimum cost. + cost_pos = source.pos + min_cost = parse_cost_limit(source) + + min_inc = parse_fuzzy_compare(source) + if min_inc is None: + raise ParseError() + + constraint = parse_constraint(source, constraints, source.get()) + + max_inc = parse_fuzzy_compare(source) + if max_inc is None: + raise ParseError() + + # Maximum cost. + cost_pos = source.pos + max_cost = parse_cost_limit(source) + + # Inclusive or exclusive limits? + if not min_inc: + min_cost += 1 + if not max_inc: + max_cost -= 1 + + if not 0 <= min_cost <= max_cost: + raise error("bad fuzzy cost limit", source.string, cost_pos) + + constraints[constraint] = min_cost, max_cost + else: + raise ParseError() + +def parse_cost_limit(source): + "Parses a cost limit." + cost_pos = source.pos + digits = parse_count(source) + + try: + return int(digits) + except ValueError: + pass + + raise error("bad fuzzy cost limit", source.string, cost_pos) + +def parse_constraint(source, constraints, ch): + "Parses a constraint." + if ch not in "deis": + raise ParseError() + + if ch in constraints: + raise ParseError() + + return ch + +def parse_fuzzy_compare(source): + "Parses a cost comparator." + if source.match("<="): + return True + elif source.match("<"): + return False + else: + return None + +def parse_cost_equation(source, constraints): + "Parses a cost equation." + if "cost" in constraints: + raise error("more than one cost equation", source.string, source.pos) + + cost = {} + + parse_cost_term(source, cost) + while source.match("+"): + parse_cost_term(source, cost) + + max_inc = parse_fuzzy_compare(source) + if max_inc is None: + raise ParseError() + + max_cost = int(parse_count(source)) + + if not max_inc: + max_cost -= 1 + + if max_cost < 0: + raise error("bad fuzzy cost limit", source.string, source.pos) + + cost["max"] = max_cost + + constraints["cost"] = cost + +def parse_cost_term(source, cost): + "Parses a cost equation term." + coeff = parse_count(source) + ch = source.get() + if ch not in "dis": + raise ParseError() + + if ch in cost: + raise error("repeated fuzzy cost", source.string, source.pos) + + cost[ch] = int(coeff or 1) + +def parse_fuzzy_test(source, info, case_flags): + saved_pos = source.pos + ch = source.get() + if ch in SPECIAL_CHARS: + if ch == "\\": + # An escape sequence outside a set. + return parse_escape(source, info, False) + elif ch == ".": + # Any character. + if info.flags & DOTALL: + return AnyAll() + elif info.flags & WORD: + return AnyU() + else: + return Any() + elif ch == "[": + # A character set. + return parse_set(source, info) + else: + raise error("expected character set", source.string, saved_pos) + elif ch: + # A literal. + return Character(ord(ch), case_flags=case_flags) + else: + raise error("expected character set", source.string, saved_pos) + +def parse_count(source): + "Parses a quantifier's count, which can be empty." + return source.get_while(DIGITS) + +def parse_paren(source, info): + """Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an + inline flag. + """ + saved_pos = source.pos + ch = source.get(True) + if ch == "?": + # (?... + saved_pos_2 = source.pos + ch = source.get(True) + if ch == "<": + # (?<... + saved_pos_3 = source.pos + ch = source.get() + if ch in ("=", "!"): + # (?<=... or (?") + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + return Group(info, group, subpattern) + if ch in ("=", "!"): + # (?=... or (?!...: lookahead. + return parse_lookaround(source, info, False, ch == "=") + if ch == "P": + # (?P...: a Python extension. + return parse_extension(source, info) + if ch == "#": + # (?#...: a comment. + return parse_comment(source) + if ch == "(": + # (?(...: a conditional subpattern. + return parse_conditional(source, info) + if ch == ">": + # (?>...: an atomic subpattern. + return parse_atomic(source, info) + if ch == "|": + # (?|...: a common/reset groups branch. + return parse_common(source, info) + if ch == "R" or "0" <= ch <= "9": + # (?R...: probably a call to a group. + return parse_call_group(source, info, ch, saved_pos_2) + if ch == "&": + # (?&...: a call to a named group. + return parse_call_named_group(source, info, saved_pos_2) + + # (?...: probably a flags subpattern. + source.pos = saved_pos_2 + return parse_flags_subpattern(source, info) + + if ch == "*": + # (*... + saved_pos_2 = source.pos + word = source.get_while(set(")>"), include=False) + if word[ : 1].isalpha(): + verb = VERBS.get(word) + if not verb: + raise error("unknown verb", source.string, saved_pos_2) + + source.expect(")") + + return verb + + # (...: an unnamed capture group. + source.pos = saved_pos + group = info.open_group() + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + + return Group(info, group, subpattern) + +def parse_extension(source, info): + "Parses a Python extension." + saved_pos = source.pos + ch = source.get() + if ch == "<": + # (?P<...: a named capture group. + name = parse_name(source) + group = info.open_group(name) + source.expect(">") + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + + return Group(info, group, subpattern) + if ch == "=": + # (?P=...: a named group reference. + name = parse_name(source, allow_numeric=True) + source.expect(")") + if info.is_open_group(name): + raise error("cannot refer to an open group", source.string, + saved_pos) + + return make_ref_group(info, name, saved_pos) + if ch == ">" or ch == "&": + # (?P>...: a call to a group. + return parse_call_named_group(source, info, saved_pos) + + source.pos = saved_pos + raise error("unknown extension", source.string, saved_pos) + +def parse_comment(source): + "Parses a comment." + while True: + saved_pos = source.pos + c = source.get(True) + + if not c or c == ")": + break + + if c == "\\": + c = source.get(True) + + source.pos = saved_pos + source.expect(")") + + return None + +def parse_lookaround(source, info, behind, positive): + "Parses a lookaround." + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + return LookAround(behind, positive, subpattern) + +def parse_conditional(source, info): + "Parses a conditional subpattern." + saved_flags = info.flags + saved_pos = source.pos + ch = source.get() + if ch == "?": + # (?(?... + ch = source.get() + if ch in ("=", "!"): + # (?(?=... or (?(?!...: lookahead conditional. + return parse_lookaround_conditional(source, info, False, ch == "=") + if ch == "<": + # (?(?<... + ch = source.get() + if ch in ("=", "!"): + # (?(?<=... or (?(?"), include=False) + + if not name: + raise error("missing group name", source.string, source.pos) + + if name.isdigit(): + min_group = 0 if allow_group_0 else 1 + if not allow_numeric or int(name) < min_group: + raise error("bad character in group name", source.string, + source.pos) + else: + if not name.isidentifier(): + raise error("bad character in group name", source.string, + source.pos) + + return name + +def is_octal(string): + "Checks whether a string is octal." + return all(ch in OCT_DIGITS for ch in string) + +def is_decimal(string): + "Checks whether a string is decimal." + return all(ch in DIGITS for ch in string) + +def is_hexadecimal(string): + "Checks whether a string is hexadecimal." + return all(ch in HEX_DIGITS for ch in string) + +def parse_escape(source, info, in_set): + "Parses an escape sequence." + saved_ignore = source.ignore_space + source.ignore_space = False + ch = source.get() + source.ignore_space = saved_ignore + if not ch: + # A backslash at the end of the pattern. + raise error("bad escape (end of pattern)", source.string, source.pos) + if ch in HEX_ESCAPES: + # A hexadecimal escape sequence. + return parse_hex_escape(source, info, ch, HEX_ESCAPES[ch], in_set, ch) + elif ch == "g" and not in_set: + # A group reference. + saved_pos = source.pos + try: + return parse_group_ref(source, info) + except error: + # Invalid as a group reference, so assume it's a literal. + source.pos = saved_pos + + return make_character(info, ord(ch), in_set) + elif ch == "G" and not in_set: + # A search anchor. + return SearchAnchor() + elif ch == "L" and not in_set: + # A string set. + return parse_string_set(source, info) + elif ch == "N": + # A named codepoint. + return parse_named_char(source, info, in_set) + elif ch in "pP": + # A Unicode property, positive or negative. + return parse_property(source, info, ch == "p", in_set) + elif ch == "R" and not in_set: + # A line ending. + charset = [0x0A, 0x0B, 0x0C, 0x0D] + if info.guess_encoding == UNICODE: + charset.extend([0x85, 0x2028, 0x2029]) + + return Atomic(Branch([String([0x0D, 0x0A]), SetUnion(info, [Character(c) + for c in charset])])) + elif ch == "X" and not in_set: + # A grapheme cluster. + return Grapheme() + elif ch in ALPHA: + # An alphabetic escape sequence. + # Positional escapes aren't allowed inside a character set. + if not in_set: + if info.flags & WORD: + value = WORD_POSITION_ESCAPES.get(ch) + else: + value = POSITION_ESCAPES.get(ch) + + if value: + return value + + value = CHARSET_ESCAPES.get(ch) + if value: + return value + + value = CHARACTER_ESCAPES.get(ch) + if value: + return Character(ord(value)) + + raise error("bad escape \\%s" % ch, source.string, source.pos) + elif ch in DIGITS: + # A numeric escape sequence. + return parse_numeric_escape(source, info, ch, in_set) + else: + # A literal. + return make_character(info, ord(ch), in_set) + +def parse_numeric_escape(source, info, ch, in_set): + "Parses a numeric escape sequence." + if in_set or ch == "0": + # Octal escape sequence, max 3 digits. + return parse_octal_escape(source, info, [ch], in_set) + + # At least 1 digit, so either octal escape or group. + digits = ch + saved_pos = source.pos + ch = source.get() + if ch in DIGITS: + # At least 2 digits, so either octal escape or group. + digits += ch + saved_pos = source.pos + ch = source.get() + if is_octal(digits) and ch in OCT_DIGITS: + # 3 octal digits, so octal escape sequence. + encoding = info.flags & _ALL_ENCODINGS + if encoding == ASCII or encoding == LOCALE: + octal_mask = 0xFF + else: + octal_mask = 0x1FF + + value = int(digits + ch, 8) & octal_mask + return make_character(info, value) + + # Group reference. + source.pos = saved_pos + if info.is_open_group(digits): + raise error("cannot refer to an open group", source.string, source.pos) + + return make_ref_group(info, digits, source.pos) + +def parse_octal_escape(source, info, digits, in_set): + "Parses an octal escape sequence." + saved_pos = source.pos + ch = source.get() + while len(digits) < 3 and ch in OCT_DIGITS: + digits.append(ch) + saved_pos = source.pos + ch = source.get() + + source.pos = saved_pos + try: + value = int("".join(digits), 8) + return make_character(info, value, in_set) + except ValueError: + if digits[0] in OCT_DIGITS: + raise error("incomplete escape \\%s" % ''.join(digits), + source.string, source.pos) + else: + raise error("bad escape \\%s" % digits[0], source.string, + source.pos) + +def parse_hex_escape(source, info, esc, expected_len, in_set, type): + "Parses a hex escape sequence." + saved_pos = source.pos + digits = [] + for i in range(expected_len): + ch = source.get() + if ch not in HEX_DIGITS: + raise error("incomplete escape \\%s%s" % (type, ''.join(digits)), + source.string, saved_pos) + digits.append(ch) + + try: + value = int("".join(digits), 16) + except ValueError: + pass + else: + if value < 0x110000: + return make_character(info, value, in_set) + + # Bad hex escape. + raise error("bad hex escape \\%s%s" % (esc, ''.join(digits)), + source.string, saved_pos) + +def parse_group_ref(source, info): + "Parses a group reference." + source.expect("<") + saved_pos = source.pos + name = parse_name(source, True) + source.expect(">") + if info.is_open_group(name): + raise error("cannot refer to an open group", source.string, source.pos) + + return make_ref_group(info, name, saved_pos) + +def parse_string_set(source, info): + "Parses a string set reference." + source.expect("<") + name = parse_name(source, True) + source.expect(">") + if name is None or name not in info.kwargs: + raise error("undefined named list", source.string, source.pos) + + return make_string_set(info, name) + +def parse_named_char(source, info, in_set): + "Parses a named character." + saved_pos = source.pos + if source.match("{"): + name = source.get_while(NAMED_CHAR_PART) + if source.match("}"): + try: + value = unicodedata.lookup(name) + return make_character(info, ord(value), in_set) + except KeyError: + raise error("undefined character name", source.string, + source.pos) + + source.pos = saved_pos + return make_character(info, ord("N"), in_set) + +def parse_property(source, info, positive, in_set): + "Parses a Unicode property." + saved_pos = source.pos + ch = source.get() + if ch == "{": + negate = source.match("^") + prop_name, name = parse_property_name(source) + if source.match("}"): + # It's correctly delimited. + prop = lookup_property(prop_name, name, positive != negate, source) + return make_property(info, prop, in_set) + elif ch and ch in "CLMNPSZ": + # An abbreviated property, eg \pL. + prop = lookup_property(None, ch, positive, source) + return make_property(info, prop, in_set) + + # Not a property, so treat as a literal "p" or "P". + source.pos = saved_pos + ch = "p" if positive else "P" + return make_character(info, ord(ch), in_set) + +def parse_property_name(source): + "Parses a property name, which may be qualified." + name = source.get_while(PROPERTY_NAME_PART) + saved_pos = source.pos + + ch = source.get() + if ch and ch in ":=": + prop_name = name + name = source.get_while(ALNUM | set(" &_-./")).strip() + + if name: + # Name after the ":" or "=", so it's a qualified name. + saved_pos = source.pos + else: + # No name after the ":" or "=", so assume it's an unqualified name. + prop_name, name = None, prop_name + else: + prop_name = None + + source.pos = saved_pos + return prop_name, name + +def parse_set(source, info): + "Parses a character set." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + saved_ignore = source.ignore_space + source.ignore_space = False + # Negative set? + negate = source.match("^") + try: + if version == VERSION0: + item = parse_set_imp_union(source, info) + else: + item = parse_set_union(source, info) + + if not source.match("]"): + raise error("missing ]", source.string, source.pos) + finally: + source.ignore_space = saved_ignore + + if negate: + item = item.with_flags(positive=not item.positive) + + item = item.with_flags(case_flags=make_case_flags(info)) + + return item + +def parse_set_union(source, info): + "Parses a set union ([x||y])." + items = [parse_set_symm_diff(source, info)] + while source.match("||"): + items.append(parse_set_symm_diff(source, info)) + + if len(items) == 1: + return items[0] + return SetUnion(info, items) + +def parse_set_symm_diff(source, info): + "Parses a set symmetric difference ([x~~y])." + items = [parse_set_inter(source, info)] + while source.match("~~"): + items.append(parse_set_inter(source, info)) + + if len(items) == 1: + return items[0] + return SetSymDiff(info, items) + +def parse_set_inter(source, info): + "Parses a set intersection ([x&&y])." + items = [parse_set_diff(source, info)] + while source.match("&&"): + items.append(parse_set_diff(source, info)) + + if len(items) == 1: + return items[0] + return SetInter(info, items) + +def parse_set_diff(source, info): + "Parses a set difference ([x--y])." + items = [parse_set_imp_union(source, info)] + while source.match("--"): + items.append(parse_set_imp_union(source, info)) + + if len(items) == 1: + return items[0] + return SetDiff(info, items) + +def parse_set_imp_union(source, info): + "Parses a set implicit union ([xy])." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + items = [parse_set_member(source, info)] + while True: + saved_pos = source.pos + if source.match("]"): + # End of the set. + source.pos = saved_pos + break + + if version == VERSION1 and any(source.match(op) for op in SET_OPS): + # The new behaviour has set operators. + source.pos = saved_pos + break + + items.append(parse_set_member(source, info)) + + if len(items) == 1: + return items[0] + return SetUnion(info, items) + +def parse_set_member(source, info): + "Parses a member in a character set." + # Parse a set item. + start = parse_set_item(source, info) + saved_pos1 = source.pos + if (not isinstance(start, Character) or not start.positive or not + source.match("-")): + # It's not the start of a range. + return start + + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + # It looks like the start of a range of characters. + saved_pos2 = source.pos + if version == VERSION1 and source.match("-"): + # It's actually the set difference operator '--', so return the + # character. + source.pos = saved_pos1 + return start + + if source.match("]"): + # We've reached the end of the set, so return both the character and + # hyphen. + source.pos = saved_pos2 + return SetUnion(info, [start, Character(ord("-"))]) + + # Parse a set item. + end = parse_set_item(source, info) + if not isinstance(end, Character) or not end.positive: + # It's not a range, so return the character, hyphen and property. + return SetUnion(info, [start, Character(ord("-")), end]) + + # It _is_ a range. + if start.value > end.value: + raise error("bad character range", source.string, source.pos) + + if start.value == end.value: + return start + + return Range(start.value, end.value) + +def parse_set_item(source, info): + "Parses an item in a character set." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + if source.match("\\"): + # An escape sequence in a set. + return parse_escape(source, info, True) + + saved_pos = source.pos + if source.match("[:"): + # Looks like a POSIX character class. + try: + return parse_posix_class(source, info) + except ParseError: + # Not a POSIX character class. + source.pos = saved_pos + + if version == VERSION1 and source.match("["): + # It's the start of a nested set. + + # Negative set? + negate = source.match("^") + item = parse_set_union(source, info) + + if not source.match("]"): + raise error("missing ]", source.string, source.pos) + + if negate: + item = item.with_flags(positive=not item.positive) + + return item + + ch = source.get() + if not ch: + raise error("unterminated character set", source.string, source.pos) + + return Character(ord(ch)) + +def parse_posix_class(source, info): + "Parses a POSIX character class." + negate = source.match("^") + prop_name, name = parse_property_name(source) + if not source.match(":]"): + raise ParseError() + + return lookup_property(prop_name, name, not negate, source, posix=True) + +def float_to_rational(flt): + "Converts a float to a rational pair." + int_part = int(flt) + error = flt - int_part + if abs(error) < 0.0001: + return int_part, 1 + + den, num = float_to_rational(1.0 / error) + + return int_part * den + num, den + +def numeric_to_rational(numeric): + "Converts a numeric string to a rational string, if possible." + if numeric[ : 1] == "-": + sign, numeric = numeric[0], numeric[1 : ] + else: + sign = "" + + parts = numeric.split("/") + if len(parts) == 2: + num, den = float_to_rational(float(parts[0]) / float(parts[1])) + elif len(parts) == 1: + num, den = float_to_rational(float(parts[0])) + else: + raise ValueError() + + result = "{}{}/{}".format(sign, num, den) + if result.endswith("/1"): + return result[ : -2] + + return result + +def standardise_name(name): + "Standardises a property or value name." + try: + return numeric_to_rational("".join(name)) + except (ValueError, ZeroDivisionError): + return "".join(ch for ch in name if ch not in "_- ").upper() + +_POSIX_CLASSES = set('ALNUM DIGIT PUNCT XDIGIT'.split()) + +_BINARY_VALUES = set('YES Y NO N TRUE T FALSE F'.split()) + +def lookup_property(property, value, positive, source=None, posix=False): + "Looks up a property." + # Normalise the names (which may still be lists). + property = standardise_name(property) if property else None + value = standardise_name(value) + + if (property, value) == ("GENERALCATEGORY", "ASSIGNED"): + property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive + + if posix and not property and value.upper() in _POSIX_CLASSES: + value = 'POSIX' + value + + if property: + # Both the property and the value are provided. + prop = PROPERTIES.get(property) + if not prop: + if not source: + raise error("unknown property") + + raise error("unknown property", source.string, source.pos) + + prop_id, value_dict = prop + val_id = value_dict.get(value) + if val_id is None: + if not source: + raise error("unknown property value") + + raise error("unknown property value", source.string, source.pos) + + return Property((prop_id << 16) | val_id, positive) + + # Only the value is provided. + # It might be the name of a GC, script or block value. + for property in ("GC", "SCRIPT", "BLOCK"): + prop_id, value_dict = PROPERTIES.get(property) + val_id = value_dict.get(value) + if val_id is not None: + return Property((prop_id << 16) | val_id, positive) + + # It might be the name of a binary property. + prop = PROPERTIES.get(value) + if prop: + prop_id, value_dict = prop + if set(value_dict) == _BINARY_VALUES: + return Property((prop_id << 16) | 1, positive) + + return Property(prop_id << 16, not positive) + + # It might be the name of a binary property starting with a prefix. + if value.startswith("IS"): + prop = PROPERTIES.get(value[2 : ]) + if prop: + prop_id, value_dict = prop + if "YES" in value_dict: + return Property((prop_id << 16) | 1, positive) + + # It might be the name of a script or block starting with a prefix. + for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")): + if value.startswith(prefix): + prop_id, value_dict = PROPERTIES.get(property) + val_id = value_dict.get(value[2 : ]) + if val_id is not None: + return Property((prop_id << 16) | val_id, positive) + + # Unknown property. + if not source: + raise error("unknown property") + + raise error("unknown property", source.string, source.pos) + +def _compile_replacement(source, pattern, is_unicode): + "Compiles a replacement template escape sequence." + ch = source.get() + if ch in ALPHA: + # An alphabetic escape sequence. + value = CHARACTER_ESCAPES.get(ch) + if value: + return False, [ord(value)] + + if ch in HEX_ESCAPES and (ch == "x" or is_unicode): + # A hexadecimal escape sequence. + return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch], ch)] + + if ch == "g": + # A group preference. + return True, [compile_repl_group(source, pattern)] + + if ch == "N" and is_unicode: + # A named character. + value = parse_repl_named_char(source) + if value is not None: + return False, [value] + + raise error("bad escape \\%s" % ch, source.string, source.pos) + + if isinstance(source.sep, bytes): + octal_mask = 0xFF + else: + octal_mask = 0x1FF + + if ch == "0": + # An octal escape sequence. + digits = ch + while len(digits) < 3: + saved_pos = source.pos + ch = source.get() + if ch not in OCT_DIGITS: + source.pos = saved_pos + break + digits += ch + + return False, [int(digits, 8) & octal_mask] + + if ch in DIGITS: + # Either an octal escape sequence (3 digits) or a group reference (max + # 2 digits). + digits = ch + saved_pos = source.pos + ch = source.get() + if ch in DIGITS: + digits += ch + saved_pos = source.pos + ch = source.get() + if ch and is_octal(digits + ch): + # An octal escape sequence. + return False, [int(digits + ch, 8) & octal_mask] + + # A group reference. + source.pos = saved_pos + return True, [int(digits)] + + if ch == "\\": + # An escaped backslash is a backslash. + return False, [ord("\\")] + + if not ch: + # A trailing backslash. + raise error("bad escape (end of pattern)", source.string, source.pos) + + # An escaped non-backslash is a backslash followed by the literal. + return False, [ord("\\"), ord(ch)] + +def parse_repl_hex_escape(source, expected_len, type): + "Parses a hex escape sequence in a replacement string." + digits = [] + for i in range(expected_len): + ch = source.get() + if ch not in HEX_DIGITS: + raise error("incomplete escape \\%s%s" % (type, ''.join(digits)), + source.string, source.pos) + digits.append(ch) + + return int("".join(digits), 16) + +def parse_repl_named_char(source): + "Parses a named character in a replacement string." + saved_pos = source.pos + if source.match("{"): + name = source.get_while(ALPHA | set(" ")) + + if source.match("}"): + try: + value = unicodedata.lookup(name) + return ord(value) + except KeyError: + raise error("undefined character name", source.string, + source.pos) + + source.pos = saved_pos + return None + +def compile_repl_group(source, pattern): + "Compiles a replacement template group reference." + source.expect("<") + name = parse_name(source, True, True) + + source.expect(">") + if name.isdigit(): + index = int(name) + if not 0 <= index <= pattern.groups: + raise error("invalid group reference", source.string, source.pos) + + return index + + try: + return pattern.groupindex[name] + except KeyError: + raise IndexError("unknown group") + +# The regular expression is parsed into a syntax tree. The different types of +# node are defined below. + +INDENT = " " +POSITIVE_OP = 0x1 +ZEROWIDTH_OP = 0x2 +FUZZY_OP = 0x4 +REVERSE_OP = 0x8 +REQUIRED_OP = 0x10 + +POS_TEXT = {False: "NON-MATCH", True: "MATCH"} +CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "", + FULLIGNORECASE: " FULL_IGNORE_CASE"} + +def make_sequence(items): + if len(items) == 1: + return items[0] + return Sequence(items) + +# Common base class for all nodes. +class RegexBase: + def __init__(self): + self._key = self.__class__ + + def with_flags(self, positive=None, case_flags=None, zerowidth=None): + if positive is None: + positive = self.positive + else: + positive = bool(positive) + if case_flags is None: + case_flags = self.case_flags + else: + case_flags = CASE_FLAGS_COMBINATIONS[case_flags & CASE_FLAGS] + if zerowidth is None: + zerowidth = self.zerowidth + else: + zerowidth = bool(zerowidth) + + if (positive == self.positive and case_flags == self.case_flags and + zerowidth == self.zerowidth): + return self + + return self.rebuild(positive, case_flags, zerowidth) + + def fix_groups(self, pattern, reverse, fuzzy): + pass + + def optimise(self, info, reverse): + return self + + def pack_characters(self, info): + return self + + def remove_captures(self): + return self + + def is_atomic(self): + return True + + def can_be_affix(self): + return True + + def contains_group(self): + return False + + def get_firstset(self, reverse): + raise _FirstSetError() + + def has_simple_start(self): + return False + + def compile(self, reverse=False, fuzzy=False): + return self._compile(reverse, fuzzy) + + def is_empty(self): + return False + + def __hash__(self): + return hash(self._key) + + def __eq__(self, other): + return type(self) is type(other) and self._key == other._key + + def __ne__(self, other): + return not self.__eq__(other) + + def get_required_string(self, reverse): + return self.max_width(), None + +# Base class for zero-width nodes. +class ZeroWidthBase(RegexBase): + def __init__(self, positive=True): + RegexBase.__init__(self) + self.positive = bool(positive) + + self._key = self.__class__, self.positive + + def get_firstset(self, reverse): + return set([None]) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if fuzzy: + flags |= FUZZY_OP + if reverse: + flags |= REVERSE_OP + return [(self._opcode, flags)] + + def dump(self, indent, reverse): + print("{}{} {}".format(INDENT * indent, self._op_name, + POS_TEXT[self.positive])) + + def max_width(self): + return 0 + +class Any(RegexBase): + _opcode = {False: OP.ANY, True: OP.ANY_REV} + _op_name = "ANY" + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[reverse], flags)] + + def dump(self, indent, reverse): + print("{}{}".format(INDENT * indent, self._op_name)) + + def max_width(self): + return 1 + +class AnyAll(Any): + _opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV} + _op_name = "ANY_ALL" + +class AnyU(Any): + _opcode = {False: OP.ANY_U, True: OP.ANY_U_REV} + _op_name = "ANY_U" + +class Atomic(RegexBase): + def __init__(self, subpattern): + RegexBase.__init__(self) + self.subpattern = subpattern + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + self.subpattern = self.subpattern.optimise(info, reverse) + + if self.subpattern.is_empty(): + return self.subpattern + return self + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def can_be_affix(self): + return self.subpattern.can_be_affix() + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + return self.subpattern.get_firstset(reverse) + + def has_simple_start(self): + return self.subpattern.has_simple_start() + + def _compile(self, reverse, fuzzy): + return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) + + [(OP.END, )]) + + def dump(self, indent, reverse): + print("{}ATOMIC".format(INDENT * indent)) + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return (type(self) is type(other) and self.subpattern == + other.subpattern) + + def max_width(self): + return self.subpattern.max_width() + + def get_required_string(self, reverse): + return self.subpattern.get_required_string(reverse) + +class Boundary(ZeroWidthBase): + _opcode = OP.BOUNDARY + _op_name = "BOUNDARY" + +class Branch(RegexBase): + def __init__(self, branches): + RegexBase.__init__(self) + self.branches = branches + + def fix_groups(self, pattern, reverse, fuzzy): + for b in self.branches: + b.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + if not self.branches: + return Sequence([]) + + # Flatten branches within branches. + branches = Branch._flatten_branches(info, reverse, self.branches) + + # Move any common prefix or suffix out of the branches. + if reverse: + suffix, branches = Branch._split_common_suffix(info, branches) + prefix = [] + else: + prefix, branches = Branch._split_common_prefix(info, branches) + suffix = [] + + # Try to reduce adjacent single-character branches to sets. + branches = Branch._reduce_to_set(info, reverse, branches) + + if len(branches) > 1: + sequence = [Branch(branches)] + + if not prefix or not suffix: + # We might be able to add a quick precheck before the branches. + firstset = self._add_precheck(info, reverse, branches) + + if firstset: + if reverse: + sequence.append(firstset) + else: + sequence.insert(0, firstset) + else: + sequence = branches + + return make_sequence(prefix + sequence + suffix) + + def _add_precheck(self, info, reverse, branches): + charset = set() + pos = -1 if reverse else 0 + + for branch in branches: + if type(branch) is Literal and branch.case_flags == NOCASE: + charset.add(branch.characters[pos]) + else: + return + + if not charset: + return None + + return _check_firstset(info, reverse, [Character(c) for c in charset]) + + def pack_characters(self, info): + self.branches = [b.pack_characters(info) for b in self.branches] + return self + + def remove_captures(self): + self.branches = [b.remove_captures() for b in self.branches] + return self + + def is_atomic(self): + return all(b.is_atomic() for b in self.branches) + + def can_be_affix(self): + return all(b.can_be_affix() for b in self.branches) + + def contains_group(self): + return any(b.contains_group() for b in self.branches) + + def get_firstset(self, reverse): + fs = set() + for b in self.branches: + fs |= b.get_firstset(reverse) + + return fs or set([None]) + + def _compile(self, reverse, fuzzy): + code = [(OP.BRANCH, )] + for b in self.branches: + code.extend(b.compile(reverse, fuzzy)) + code.append((OP.NEXT, )) + + code[-1] = (OP.END, ) + + return code + + def dump(self, indent, reverse): + print("{}BRANCH".format(INDENT * indent)) + self.branches[0].dump(indent + 1, reverse) + for b in self.branches[1 : ]: + print("{}OR".format(INDENT * indent)) + b.dump(indent + 1, reverse) + + @staticmethod + def _flatten_branches(info, reverse, branches): + # Flatten the branches so that there aren't branches of branches. + new_branches = [] + for b in branches: + b = b.optimise(info, reverse) + if isinstance(b, Branch): + new_branches.extend(b.branches) + else: + new_branches.append(b) + + return new_branches + + @staticmethod + def _split_common_prefix(info, branches): + # Common leading items can be moved out of the branches. + # Get the items in the branches. + alternatives = [] + for b in branches: + if isinstance(b, Sequence): + alternatives.append(b.items) + else: + alternatives.append([b]) + + # What is the maximum possible length of the prefix? + max_count = min(len(a) for a in alternatives) + + # What is the longest common prefix? + prefix = alternatives[0] + pos = 0 + end_pos = max_count + while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] == + prefix[pos] for a in alternatives): + pos += 1 + count = pos + + if info.flags & UNICODE: + # We need to check that we're not splitting a sequence of + # characters which could form part of full case-folding. + count = pos + while count > 0 and not all(Branch._can_split(a, count) for a in + alternatives): + count -= 1 + + # No common prefix is possible. + if count == 0: + return [], branches + + # Rebuild the branches. + new_branches = [] + for a in alternatives: + new_branches.append(make_sequence(a[count : ])) + + return prefix[ : count], new_branches + + @staticmethod + def _split_common_suffix(info, branches): + # Common trailing items can be moved out of the branches. + # Get the items in the branches. + alternatives = [] + for b in branches: + if isinstance(b, Sequence): + alternatives.append(b.items) + else: + alternatives.append([b]) + + # What is the maximum possible length of the suffix? + max_count = min(len(a) for a in alternatives) + + # What is the longest common suffix? + suffix = alternatives[0] + pos = -1 + end_pos = -1 - max_count + while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] == + suffix[pos] for a in alternatives): + pos -= 1 + count = -1 - pos + + if info.flags & UNICODE: + # We need to check that we're not splitting a sequence of + # characters which could form part of full case-folding. + while count > 0 and not all(Branch._can_split_rev(a, count) for a + in alternatives): + count -= 1 + + # No common suffix is possible. + if count == 0: + return [], branches + + # Rebuild the branches. + new_branches = [] + for a in alternatives: + new_branches.append(make_sequence(a[ : -count])) + + return suffix[-count : ], new_branches + + @staticmethod + def _can_split(items, count): + # Check the characters either side of the proposed split. + if not Branch._is_full_case(items, count - 1): + return True + + if not Branch._is_full_case(items, count): + return True + + # Check whether a 1-1 split would be OK. + if Branch._is_folded(items[count - 1 : count + 1]): + return False + + # Check whether a 1-2 split would be OK. + if (Branch._is_full_case(items, count + 2) and + Branch._is_folded(items[count - 1 : count + 2])): + return False + + # Check whether a 2-1 split would be OK. + if (Branch._is_full_case(items, count - 2) and + Branch._is_folded(items[count - 2 : count + 1])): + return False + + return True + + @staticmethod + def _can_split_rev(items, count): + end = len(items) + + # Check the characters either side of the proposed split. + if not Branch._is_full_case(items, end - count): + return True + + if not Branch._is_full_case(items, end - count - 1): + return True + + # Check whether a 1-1 split would be OK. + if Branch._is_folded(items[end - count - 1 : end - count + 1]): + return False + + # Check whether a 1-2 split would be OK. + if (Branch._is_full_case(items, end - count + 2) and + Branch._is_folded(items[end - count - 1 : end - count + 2])): + return False + + # Check whether a 2-1 split would be OK. + if (Branch._is_full_case(items, end - count - 2) and + Branch._is_folded(items[end - count - 2 : end - count + 1])): + return False + + return True + + @staticmethod + def _merge_common_prefixes(info, reverse, branches): + # Branches with the same case-sensitive character prefix can be grouped + # together if they are separated only by other branches with a + # character prefix. + prefixed = defaultdict(list) + order = {} + new_branches = [] + for b in branches: + if Branch._is_simple_character(b): + # Branch starts with a simple character. + prefixed[b.value].append([b]) + order.setdefault(b.value, len(order)) + elif (isinstance(b, Sequence) and b.items and + Branch._is_simple_character(b.items[0])): + # Branch starts with a simple character. + prefixed[b.items[0].value].append(b.items) + order.setdefault(b.items[0].value, len(order)) + else: + Branch._flush_char_prefix(info, reverse, prefixed, order, + new_branches) + + new_branches.append(b) + + Branch._flush_char_prefix(info, prefixed, order, new_branches) + + return new_branches + + @staticmethod + def _is_simple_character(c): + return isinstance(c, Character) and c.positive and not c.case_flags + + @staticmethod + def _reduce_to_set(info, reverse, branches): + # Can the branches be reduced to a set? + new_branches = [] + items = set() + case_flags = NOCASE + for b in branches: + if isinstance(b, (Character, Property, SetBase)): + # Branch starts with a single character. + if b.case_flags != case_flags: + # Different case sensitivity, so flush. + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + case_flags = b.case_flags + + items.add(b.with_flags(case_flags=NOCASE)) + else: + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + new_branches.append(b) + + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + return new_branches + + @staticmethod + def _flush_char_prefix(info, reverse, prefixed, order, new_branches): + # Flush the prefixed branches. + if not prefixed: + return + + for value, branches in sorted(prefixed.items(), key=lambda pair: + order[pair[0]]): + if len(branches) == 1: + new_branches.append(make_sequence(branches[0])) + else: + subbranches = [] + optional = False + for b in branches: + if len(b) > 1: + subbranches.append(make_sequence(b[1 : ])) + elif not optional: + subbranches.append(Sequence()) + optional = True + + sequence = Sequence([Character(value), Branch(subbranches)]) + new_branches.append(sequence.optimise(info, reverse)) + + prefixed.clear() + order.clear() + + @staticmethod + def _flush_set_members(info, reverse, items, case_flags, new_branches): + # Flush the set members. + if not items: + return + + if len(items) == 1: + item = list(items)[0] + else: + item = SetUnion(info, list(items)).optimise(info, reverse) + + new_branches.append(item.with_flags(case_flags=case_flags)) + + items.clear() + + @staticmethod + def _is_full_case(items, i): + if not 0 <= i < len(items): + return False + + item = items[i] + return (isinstance(item, Character) and item.positive and + (item.case_flags & FULLIGNORECASE) == FULLIGNORECASE) + + @staticmethod + def _is_folded(items): + if len(items) < 2: + return False + + for i in items: + if (not isinstance(i, Character) or not i.positive or not + i.case_flags): + return False + + folded = "".join(chr(i.value) for i in items) + folded = _regex.fold_case(FULL_CASE_FOLDING, folded) + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + for c in expanding_chars: + if folded == _regex.fold_case(FULL_CASE_FOLDING, c): + return True + + return False + + def is_empty(self): + return all(b.is_empty() for b in self.branches) + + def __eq__(self, other): + return type(self) is type(other) and self.branches == other.branches + + def max_width(self): + return max(b.max_width() for b in self.branches) + +class CallGroup(RegexBase): + def __init__(self, info, group, position): + RegexBase.__init__(self) + self.info = info + self.group = group + self.position = position + + self._key = self.__class__, self.group + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + raise error("invalid group reference", pattern, self.position) + + if not 0 <= self.group <= self.info.group_count: + raise error("unknown group", pattern, self.position) + + if self.group > 0 and self.info.open_group_count[self.group] > 1: + raise error("ambiguous group reference", pattern, self.position) + + self.info.group_calls.append((self, reverse, fuzzy)) + + self._key = self.__class__, self.group + + def remove_captures(self): + raise error("group reference not allowed", pattern, self.position) + + def _compile(self, reverse, fuzzy): + return [(OP.GROUP_CALL, self.call_ref)] + + def dump(self, indent, reverse): + print("{}GROUP_CALL {}".format(INDENT * indent, self.group)) + + def __eq__(self, other): + return type(self) is type(other) and self.group == other.group + + def max_width(self): + return UNLIMITED + + def __del__(self): + self.info = None + +class CallRef(RegexBase): + def __init__(self, ref, parsed): + self.ref = ref + self.parsed = parsed + + def _compile(self, reverse, fuzzy): + return ([(OP.CALL_REF, self.ref)] + self.parsed._compile(reverse, + fuzzy) + [(OP.END, )]) + +class Character(RegexBase): + _opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False): + OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE, + False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE, + True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV, + (FULLIGNORECASE, True): OP.CHARACTER_IGN_REV} + + def __init__(self, value, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.value = value + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + if (self.positive and (self.case_flags & FULLIGNORECASE) == + FULLIGNORECASE): + self.folded = _regex.fold_case(FULL_CASE_FOLDING, chr(self.value)) + else: + self.folded = chr(self.value) + + self._key = (self.__class__, self.value, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Character(self.value, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + return self + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + + code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags, + self.value]) + + if len(self.folded) > 1: + # The character expands on full case-folding. + code = Branch([code, String([ord(c) for c in self.folded], + case_flags=self.case_flags)]) + + return code.compile(reverse, fuzzy) + + def dump(self, indent, reverse): + display = ascii(chr(self.value)).lstrip("bu") + print("{}CHARACTER {} {}{}".format(INDENT * indent, + POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return (ch == self.value) == self.positive + + def max_width(self): + return len(self.folded) + + def get_required_string(self, reverse): + if not self.positive: + return 1, None + + self.folded_characters = tuple(ord(c) for c in self.folded) + + return 0, self + +class Conditional(RegexBase): + def __init__(self, info, group, yes_item, no_item, position): + RegexBase.__init__(self) + self.info = info + self.group = group + self.yes_item = yes_item + self.no_item = no_item + self.position = position + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + if self.group == 'DEFINE': + # 'DEFINE' is a special name unless there's a group with + # that name. + self.group = 0 + else: + raise error("unknown group", pattern, self.position) + + if not 0 <= self.group <= self.info.group_count: + raise error("invalid group reference", pattern, self.position) + + self.yes_item.fix_groups(pattern, reverse, fuzzy) + self.no_item.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + yes_item = self.yes_item.optimise(info, reverse) + no_item = self.no_item.optimise(info, reverse) + + return Conditional(info, self.group, yes_item, no_item, self.position) + + def pack_characters(self, info): + self.yes_item = self.yes_item.pack_characters(info) + self.no_item = self.no_item.pack_characters(info) + return self + + def remove_captures(self): + self.yes_item = self.yes_item.remove_captures() + self.no_item = self.no_item.remove_captures() + + def is_atomic(self): + return self.yes_item.is_atomic() and self.no_item.is_atomic() + + def can_be_affix(self): + return self.yes_item.can_be_affix() and self.no_item.can_be_affix() + + def contains_group(self): + return self.yes_item.contains_group() or self.no_item.contains_group() + + def get_firstset(self, reverse): + return (self.yes_item.get_firstset(reverse) | + self.no_item.get_firstset(reverse)) + + def _compile(self, reverse, fuzzy): + code = [(OP.GROUP_EXISTS, self.group)] + code.extend(self.yes_item.compile(reverse, fuzzy)) + add_code = self.no_item.compile(reverse, fuzzy) + if add_code: + code.append((OP.NEXT, )) + code.extend(add_code) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}GROUP_EXISTS {}".format(INDENT * indent, self.group)) + self.yes_item.dump(indent + 1, reverse) + if not self.no_item.is_empty(): + print("{}OR".format(INDENT * indent)) + self.no_item.dump(indent + 1, reverse) + + def is_empty(self): + return self.yes_item.is_empty() and self.no_item.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.group, self.yes_item, + self.no_item) == (other.group, other.yes_item, other.no_item) + + def max_width(self): + return max(self.yes_item.max_width(), self.no_item.max_width()) + + def __del__(self): + self.info = None + +class DefaultBoundary(ZeroWidthBase): + _opcode = OP.DEFAULT_BOUNDARY + _op_name = "DEFAULT_BOUNDARY" + +class DefaultEndOfWord(ZeroWidthBase): + _opcode = OP.DEFAULT_END_OF_WORD + _op_name = "DEFAULT_END_OF_WORD" + +class DefaultStartOfWord(ZeroWidthBase): + _opcode = OP.DEFAULT_START_OF_WORD + _op_name = "DEFAULT_START_OF_WORD" + +class EndOfLine(ZeroWidthBase): + _opcode = OP.END_OF_LINE + _op_name = "END_OF_LINE" + +class EndOfLineU(EndOfLine): + _opcode = OP.END_OF_LINE_U + _op_name = "END_OF_LINE_U" + +class EndOfString(ZeroWidthBase): + _opcode = OP.END_OF_STRING + _op_name = "END_OF_STRING" + +class EndOfStringLine(ZeroWidthBase): + _opcode = OP.END_OF_STRING_LINE + _op_name = "END_OF_STRING_LINE" + +class EndOfStringLineU(EndOfStringLine): + _opcode = OP.END_OF_STRING_LINE_U + _op_name = "END_OF_STRING_LINE_U" + +class EndOfWord(ZeroWidthBase): + _opcode = OP.END_OF_WORD + _op_name = "END_OF_WORD" + +class Failure(ZeroWidthBase): + _op_name = "FAILURE" + + def _compile(self, reverse, fuzzy): + return [(OP.FAILURE, )] + +class Fuzzy(RegexBase): + def __init__(self, subpattern, constraints=None): + RegexBase.__init__(self) + if constraints is None: + constraints = {} + self.subpattern = subpattern + self.constraints = constraints + + # If an error type is mentioned in the cost equation, then its maximum + # defaults to unlimited. + if "cost" in constraints: + for e in "dis": + if e in constraints["cost"]: + constraints.setdefault(e, (0, None)) + + # If any error type is mentioned, then all the error maxima default to + # 0, otherwise they default to unlimited. + if set(constraints) & set("dis"): + for e in "dis": + constraints.setdefault(e, (0, 0)) + else: + for e in "dis": + constraints.setdefault(e, (0, None)) + + # The maximum of the generic error type defaults to unlimited. + constraints.setdefault("e", (0, None)) + + # The cost equation defaults to equal costs. Also, the cost of any + # error type not mentioned in the cost equation defaults to 0. + if "cost" in constraints: + for e in "dis": + constraints["cost"].setdefault(e, 0) + else: + constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max": + constraints["e"][1]} + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, True) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def is_atomic(self): + return self.subpattern.is_atomic() + + def contains_group(self): + return self.subpattern.contains_group() + + def _compile(self, reverse, fuzzy): + # The individual limits. + arguments = [] + for e in "dise": + v = self.constraints[e] + arguments.append(v[0]) + arguments.append(UNLIMITED if v[1] is None else v[1]) + + # The coeffs of the cost equation. + for e in "dis": + arguments.append(self.constraints["cost"][e]) + + # The maximum of the cost equation. + v = self.constraints["cost"]["max"] + arguments.append(UNLIMITED if v is None else v) + + flags = 0 + if reverse: + flags |= REVERSE_OP + + test = self.constraints.get("test") + + if test: + return ([(OP.FUZZY_EXT, flags) + tuple(arguments)] + + test.compile(reverse, True) + [(OP.NEXT,)] + + self.subpattern.compile(reverse, True) + [(OP.END,)]) + + return ([(OP.FUZZY, flags) + tuple(arguments)] + + self.subpattern.compile(reverse, True) + [(OP.END,)]) + + def dump(self, indent, reverse): + constraints = self._constraints_to_string() + if constraints: + constraints = " " + constraints + print("{}FUZZY{}".format(INDENT * indent, constraints)) + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return (type(self) is type(other) and self.subpattern == + other.subpattern and self.constraints == other.constraints) + + def max_width(self): + return UNLIMITED + + def _constraints_to_string(self): + constraints = [] + + for name in "ids": + min, max = self.constraints[name] + if max == 0: + continue + + con = "" + + if min > 0: + con = "{}<=".format(min) + + con += name + + if max is not None: + con += "<={}".format(max) + + constraints.append(con) + + cost = [] + for name in "ids": + coeff = self.constraints["cost"][name] + if coeff > 0: + cost.append("{}{}".format(coeff, name)) + + limit = self.constraints["cost"]["max"] + if limit is not None and limit > 0: + cost = "{}<={}".format("+".join(cost), limit) + constraints.append(cost) + + return ",".join(constraints) + +class Grapheme(RegexBase): + def _compile(self, reverse, fuzzy): + # Match at least 1 character until a grapheme boundary is reached. Note + # that this is the same whether matching forwards or backwards. + grapheme_matcher = Atomic(Sequence([LazyRepeat(AnyAll(), 1, None), + GraphemeBoundary()])) + + return grapheme_matcher.compile(reverse, fuzzy) + + def dump(self, indent, reverse): + print("{}GRAPHEME".format(INDENT * indent)) + + def max_width(self): + return UNLIMITED + +class GraphemeBoundary: + def compile(self, reverse, fuzzy): + return [(OP.GRAPHEME_BOUNDARY, 1)] + +class GreedyRepeat(RegexBase): + _opcode = OP.GREEDY_REPEAT + _op_name = "GREEDY_REPEAT" + + def __init__(self, subpattern, min_count, max_count): + RegexBase.__init__(self) + self.subpattern = subpattern + self.min_count = min_count + self.max_count = max_count + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, reverse) + + return type(self)(subpattern, self.min_count, self.max_count) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def is_atomic(self): + return self.min_count == self.max_count and self.subpattern.is_atomic() + + def can_be_affix(self): + return False + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + fs = self.subpattern.get_firstset(reverse) + if self.min_count == 0: + fs.add(None) + + return fs + + def _compile(self, reverse, fuzzy): + repeat = [self._opcode, self.min_count] + if self.max_count is None: + repeat.append(UNLIMITED) + else: + repeat.append(self.max_count) + + subpattern = self.subpattern.compile(reverse, fuzzy) + if not subpattern: + return [] + + return ([tuple(repeat)] + subpattern + [(OP.END, )]) + + def dump(self, indent, reverse): + if self.max_count is None: + limit = "INF" + else: + limit = self.max_count + print("{}{} {} {}".format(INDENT * indent, self._op_name, + self.min_count, limit)) + + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.subpattern, self.min_count, + self.max_count) == (other.subpattern, other.min_count, + other.max_count) + + def max_width(self): + if self.max_count is None: + return UNLIMITED + + return self.subpattern.max_width() * self.max_count + + def get_required_string(self, reverse): + max_count = UNLIMITED if self.max_count is None else self.max_count + if self.min_count == 0: + w = self.subpattern.max_width() * max_count + return min(w, UNLIMITED), None + + ofs, req = self.subpattern.get_required_string(reverse) + if req: + return ofs, req + + w = self.subpattern.max_width() * max_count + return min(w, UNLIMITED), None + +class PossessiveRepeat(GreedyRepeat): + def is_atomic(self): + return True + + def _compile(self, reverse, fuzzy): + subpattern = self.subpattern.compile(reverse, fuzzy) + if not subpattern: + return [] + + repeat = [self._opcode, self.min_count] + if self.max_count is None: + repeat.append(UNLIMITED) + else: + repeat.append(self.max_count) + + return ([(OP.ATOMIC, ), tuple(repeat)] + subpattern + [(OP.END, ), + (OP.END, )]) + + def dump(self, indent, reverse): + print("{}ATOMIC".format(INDENT * indent)) + + if self.max_count is None: + limit = "INF" + else: + limit = self.max_count + print("{}{} {} {}".format(INDENT * (indent + 1), self._op_name, + self.min_count, limit)) + + self.subpattern.dump(indent + 2, reverse) + +class Group(RegexBase): + def __init__(self, info, group, subpattern): + RegexBase.__init__(self) + self.info = info + self.group = group + self.subpattern = subpattern + + self.call_ref = None + + def fix_groups(self, pattern, reverse, fuzzy): + self.info.defined_groups[self.group] = (self, reverse, fuzzy) + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, reverse) + + return Group(self.info, self.group, subpattern) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + return self.subpattern.remove_captures() + + def is_atomic(self): + return self.subpattern.is_atomic() + + def can_be_affix(self): + return False + + def contains_group(self): + return True + + def get_firstset(self, reverse): + return self.subpattern.get_firstset(reverse) + + def has_simple_start(self): + return self.subpattern.has_simple_start() + + def _compile(self, reverse, fuzzy): + code = [] + + public_group = private_group = self.group + if private_group < 0: + public_group = self.info.private_groups[private_group] + private_group = self.info.group_count - private_group + + key = self.group, reverse, fuzzy + ref = self.info.call_refs.get(key) + if ref is not None: + code += [(OP.CALL_REF, ref)] + + code += [(OP.GROUP, int(not reverse), private_group, public_group)] + code += self.subpattern.compile(reverse, fuzzy) + code += [(OP.END, )] + + if ref is not None: + code += [(OP.END, )] + + return code + + def dump(self, indent, reverse): + group = self.group + if group < 0: + group = private_groups[group] + print("{}GROUP {}".format(INDENT * indent, group)) + self.subpattern.dump(indent + 1, reverse) + + def __eq__(self, other): + return (type(self) is type(other) and (self.group, self.subpattern) == + (other.group, other.subpattern)) + + def max_width(self): + return self.subpattern.max_width() + + def get_required_string(self, reverse): + return self.subpattern.get_required_string(reverse) + + def __del__(self): + self.info = None + +class Keep(ZeroWidthBase): + _opcode = OP.KEEP + _op_name = "KEEP" + +class LazyRepeat(GreedyRepeat): + _opcode = OP.LAZY_REPEAT + _op_name = "LAZY_REPEAT" + +class LookAround(RegexBase): + _dir_text = {False: "AHEAD", True: "BEHIND"} + + def __init__(self, behind, positive, subpattern): + RegexBase.__init__(self) + self.behind = bool(behind) + self.positive = bool(positive) + self.subpattern = subpattern + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, self.behind, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, self.behind) + if self.positive and subpattern.is_empty(): + return subpattern + + return LookAround(self.behind, self.positive, subpattern) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + return self.subpattern.remove_captures() + + def is_atomic(self): + return self.subpattern.is_atomic() + + def can_be_affix(self): + return self.subpattern.can_be_affix() + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + if self.positive and self.behind == reverse: + return self.subpattern.get_firstset(reverse) + + return set([None]) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if fuzzy: + flags |= FUZZY_OP + if reverse: + flags |= REVERSE_OP + + return ([(OP.LOOKAROUND, flags, int(not self.behind))] + + self.subpattern.compile(self.behind) + [(OP.END, )]) + + def dump(self, indent, reverse): + print("{}LOOK{} {}".format(INDENT * indent, + self._dir_text[self.behind], POS_TEXT[self.positive])) + self.subpattern.dump(indent + 1, self.behind) + + def is_empty(self): + return self.positive and self.subpattern.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.behind, self.positive, + self.subpattern) == (other.behind, other.positive, other.subpattern) + + def max_width(self): + return 0 + +class LookAroundConditional(RegexBase): + _dir_text = {False: "AHEAD", True: "BEHIND"} + + def __init__(self, behind, positive, subpattern, yes_item, no_item): + RegexBase.__init__(self) + self.behind = bool(behind) + self.positive = bool(positive) + self.subpattern = subpattern + self.yes_item = yes_item + self.no_item = no_item + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + self.yes_item.fix_groups(pattern, reverse, fuzzy) + self.no_item.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, self.behind) + yes_item = self.yes_item.optimise(info, self.behind) + no_item = self.no_item.optimise(info, self.behind) + + return LookAroundConditional(self.behind, self.positive, subpattern, + yes_item, no_item) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + self.yes_item = self.yes_item.pack_characters(info) + self.no_item = self.no_item.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + self.yes_item = self.yes_item.remove_captures() + self.no_item = self.no_item.remove_captures() + + def is_atomic(self): + return (self.subpattern.is_atomic() and self.yes_item.is_atomic() and + self.no_item.is_atomic()) + + def can_be_affix(self): + return (self.subpattern.can_be_affix() and self.yes_item.can_be_affix() + and self.no_item.can_be_affix()) + + def contains_group(self): + return (self.subpattern.contains_group() or + self.yes_item.contains_group() or self.no_item.contains_group()) + + def _compile(self, reverse, fuzzy): + code = [(OP.CONDITIONAL, int(self.positive), int(not self.behind))] + code.extend(self.subpattern.compile(self.behind, fuzzy)) + code.append((OP.NEXT, )) + code.extend(self.yes_item.compile(reverse, fuzzy)) + add_code = self.no_item.compile(reverse, fuzzy) + if add_code: + code.append((OP.NEXT, )) + code.extend(add_code) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}CONDITIONAL {} {}".format(INDENT * indent, + self._dir_text[self.behind], POS_TEXT[self.positive])) + self.subpattern.dump(indent + 1, self.behind) + print("{}EITHER".format(INDENT * indent)) + self.yes_item.dump(indent + 1, reverse) + if not self.no_item.is_empty(): + print("{}OR".format(INDENT * indent)) + self.no_item.dump(indent + 1, reverse) + + def is_empty(self): + return (self.subpattern.is_empty() and self.yes_item.is_empty() or + self.no_item.is_empty()) + + def __eq__(self, other): + return type(self) is type(other) and (self.subpattern, self.yes_item, + self.no_item) == (other.subpattern, other.yes_item, other.no_item) + + def max_width(self): + return max(self.yes_item.max_width(), self.no_item.max_width()) + + def get_required_string(self, reverse): + return self.max_width(), None + +class PrecompiledCode(RegexBase): + def __init__(self, code): + self.code = code + + def _compile(self, reverse, fuzzy): + return [tuple(self.code)] + +class Property(RegexBase): + _opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False): + OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False): + OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True): + OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE, + True): OP.PROPERTY_IGN_REV} + + def __init__(self, value, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.value = value + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self._key = (self.__class__, self.value, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Property(self.value, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + return self + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.value)] + + def dump(self, indent, reverse): + prop = PROPERTY_NAMES[self.value >> 16] + name, value = prop[0], prop[1][self.value & 0xFFFF] + print("{}PROPERTY {} {}:{}{}".format(INDENT * indent, + POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return _regex.has_property_value(self.value, ch) == self.positive + + def max_width(self): + return 1 + +class Prune(ZeroWidthBase): + _op_name = "PRUNE" + + def _compile(self, reverse, fuzzy): + return [(OP.PRUNE, )] + +class Range(RegexBase): + _opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN, + (FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN, + (NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV, + (FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV} + _op_name = "RANGE" + + def __init__(self, lower, upper, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.lower = lower + self.upper = upper + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self._key = (self.__class__, self.lower, self.upper, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Range(self.lower, self.upper, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + # Is the range case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE) or in_set: + return self + + # Is full case-folding possible? + if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) != + FULLIGNORECASE): + return self + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the range. + items = [] + for ch in expanding_chars: + if self.lower <= ord(ch) <= self.upper: + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + items.append(String([ord(c) for c in folded], + case_flags=self.case_flags)) + + if not items: + # We can fall back to simple case-folding. + return self + + if len(items) < self.upper - self.lower + 1: + # Not all the characters are covered by the full case-folding. + items.insert(0, self) + + return Branch(items) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.lower, + self.upper)] + + def dump(self, indent, reverse): + display_lower = ascii(chr(self.lower)).lstrip("bu") + display_upper = ascii(chr(self.upper)).lstrip("bu") + print("{}RANGE {} {} {}{}".format(INDENT * indent, + POS_TEXT[self.positive], display_lower, display_upper, + CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return (self.lower <= ch <= self.upper) == self.positive + + def max_width(self): + return 1 + +class RefGroup(RegexBase): + _opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False): + OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE, + False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE, + True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV, + (FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV} + + def __init__(self, info, group, position, case_flags=NOCASE): + RegexBase.__init__(self) + self.info = info + self.group = group + self.position = position + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + self._key = self.__class__, self.group, self.case_flags + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + raise error("unknown group", pattern, self.position) + + if not 1 <= self.group <= self.info.group_count: + raise error("invalid group reference", pattern, self.position) + + self._key = self.__class__, self.group, self.case_flags + + def remove_captures(self): + raise error("group reference not allowed", pattern, self.position) + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.group)] + + def dump(self, indent, reverse): + print("{}REF_GROUP {}{}".format(INDENT * indent, self.group, + CASE_TEXT[self.case_flags])) + + def max_width(self): + return UNLIMITED + + def __del__(self): + self.info = None + +class SearchAnchor(ZeroWidthBase): + _opcode = OP.SEARCH_ANCHOR + _op_name = "SEARCH_ANCHOR" + +class Sequence(RegexBase): + def __init__(self, items=None): + RegexBase.__init__(self) + if items is None: + items = [] + + self.items = items + + def fix_groups(self, pattern, reverse, fuzzy): + for s in self.items: + s.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + # Flatten the sequences. + items = [] + for s in self.items: + s = s.optimise(info, reverse) + if isinstance(s, Sequence): + items.extend(s.items) + else: + items.append(s) + + return make_sequence(items) + + def pack_characters(self, info): + "Packs sequences of characters into strings." + items = [] + characters = [] + case_flags = NOCASE + for s in self.items: + if type(s) is Character and s.positive and not s.zerowidth: + if s.case_flags != case_flags: + # Different case sensitivity, so flush, unless neither the + # previous nor the new character are cased. + if s.case_flags or is_cased_i(info, s.value): + Sequence._flush_characters(info, characters, + case_flags, items) + + case_flags = s.case_flags + + characters.append(s.value) + elif type(s) is String or type(s) is Literal: + if s.case_flags != case_flags: + # Different case sensitivity, so flush, unless the neither + # the previous nor the new string are cased. + if s.case_flags or any(is_cased_i(info, c) for c in + characters): + Sequence._flush_characters(info, characters, + case_flags, items) + + case_flags = s.case_flags + + characters.extend(s.characters) + else: + Sequence._flush_characters(info, characters, case_flags, items) + + items.append(s.pack_characters(info)) + + Sequence._flush_characters(info, characters, case_flags, items) + + return make_sequence(items) + + def remove_captures(self): + self.items = [s.remove_captures() for s in self.items] + return self + + def is_atomic(self): + return all(s.is_atomic() for s in self.items) + + def can_be_affix(self): + return False + + def contains_group(self): + return any(s.contains_group() for s in self.items) + + def get_firstset(self, reverse): + fs = set() + items = self.items + if reverse: + items.reverse() + for s in items: + fs |= s.get_firstset(reverse) + if None not in fs: + return fs + fs.discard(None) + + return fs | set([None]) + + def has_simple_start(self): + return bool(self.items) and self.items[0].has_simple_start() + + def _compile(self, reverse, fuzzy): + seq = self.items + if reverse: + seq = seq[::-1] + + code = [] + for s in seq: + code.extend(s.compile(reverse, fuzzy)) + + return code + + def dump(self, indent, reverse): + for s in self.items: + s.dump(indent, reverse) + + @staticmethod + def _flush_characters(info, characters, case_flags, items): + if not characters: + return + + # Disregard case_flags if all of the characters are case-less. + if case_flags & IGNORECASE: + if not any(is_cased_i(info, c) for c in characters): + case_flags = NOCASE + + if (case_flags & FULLIGNORECASE) == FULLIGNORECASE: + literals = Sequence._fix_full_casefold(characters) + + for item in literals: + chars = item.characters + + if len(chars) == 1: + items.append(Character(chars[0], case_flags=item.case_flags)) + else: + items.append(String(chars, case_flags=item.case_flags)) + else: + if len(characters) == 1: + items.append(Character(characters[0], case_flags=case_flags)) + else: + items.append(String(characters, case_flags=case_flags)) + + characters[:] = [] + + @staticmethod + def _fix_full_casefold(characters): + # Split a literal needing full case-folding into chunks that need it + # and chunks that can use simple case-folding, which is faster. + expanded = [_regex.fold_case(FULL_CASE_FOLDING, c) for c in + _regex.get_expand_on_folding()] + string = _regex.fold_case(FULL_CASE_FOLDING, ''.join(chr(c) + for c in characters)).lower() + chunks = [] + + for e in expanded: + found = string.find(e) + + while found >= 0: + chunks.append((found, found + len(e))) + found = string.find(e, found + 1) + + pos = 0 + literals = [] + + for start, end in Sequence._merge_chunks(chunks): + if pos < start: + literals.append(Literal(characters[pos : start], + case_flags=IGNORECASE)) + + literals.append(Literal(characters[start : end], + case_flags=FULLIGNORECASE)) + pos = end + + if pos < len(characters): + literals.append(Literal(characters[pos : ], case_flags=IGNORECASE)) + + return literals + + @staticmethod + def _merge_chunks(chunks): + if len(chunks) < 2: + return chunks + + chunks.sort() + + start, end = chunks[0] + new_chunks = [] + + for s, e in chunks[1 : ]: + if s <= end: + end = max(end, e) + else: + new_chunks.append((start, end)) + start, end = s, e + + new_chunks.append((start, end)) + + return new_chunks + + def is_empty(self): + return all(i.is_empty() for i in self.items) + + def __eq__(self, other): + return type(self) is type(other) and self.items == other.items + + def max_width(self): + return sum(s.max_width() for s in self.items) + + def get_required_string(self, reverse): + seq = self.items + if reverse: + seq = seq[::-1] + + offset = 0 + + for s in seq: + ofs, req = s.get_required_string(reverse) + offset += ofs + if req: + return offset, req + + return offset, None + +class SetBase(RegexBase): + def __init__(self, info, items, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.info = info + self.items = tuple(items) + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self.char_width = 1 + + self._key = (self.__class__, self.items, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return type(self)(self.info, self.items, positive, case_flags, + zerowidth).optimise(self.info, False) + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + code = [(self._opcode[self.case_flags, reverse], flags)] + for m in self.items: + code.extend(m.compile()) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}{} {}{}".format(INDENT * indent, self._op_name, + POS_TEXT[self.positive], CASE_TEXT[self.case_flags])) + for i in self.items: + i.dump(indent + 1, reverse) + + def _handle_case_folding(self, info, in_set): + # Is the set case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE) or in_set: + return self + + # Is full case-folding possible? + if (not (self.info.flags & UNICODE) or (self.case_flags & + FULLIGNORECASE) != FULLIGNORECASE): + return self + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the set. + items = [] + seen = set() + for ch in expanding_chars: + if self.matches(ord(ch)): + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + if folded not in seen: + items.append(String([ord(c) for c in folded], + case_flags=self.case_flags)) + seen.add(folded) + + if not items: + # We can fall back to simple case-folding. + return self + + return Branch([self] + items) + + def max_width(self): + # Is the set case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE): + return 1 + + # Is full case-folding possible? + if (not (self.info.flags & UNICODE) or (self.case_flags & + FULLIGNORECASE) != FULLIGNORECASE): + return 1 + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the set. + seen = set() + for ch in expanding_chars: + if self.matches(ord(ch)): + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + seen.add(folded) + + if not seen: + return 1 + + return max(len(folded) for folded in seen) + + def __del__(self): + self.info = None + +class SetDiff(SetBase): + _opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False): + OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False): + OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True): + OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE, + True): OP.SET_DIFF_IGN_REV} + _op_name = "SET_DIFF" + + def optimise(self, info, reverse, in_set=False): + items = self.items + if len(items) > 2: + items = [items[0], SetUnion(info, items[1 : ])] + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(m.optimise(info, reverse, in_set=True) for m in + items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = self.items[0].matches(ch) and not self.items[1].matches(ch) + return m == self.positive + +class SetInter(SetBase): + _opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False): + OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE, + False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE, + True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV, + (FULLIGNORECASE, True): OP.SET_INTER_IGN_REV} + _op_name = "SET_INTER" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetInter) and m.positive: + # Intersection in intersection. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = all(i.matches(ch) for i in self.items) + return m == self.positive + +class SetSymDiff(SetBase): + _opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False): + OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE, + False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV, + (IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True): + OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV} + _op_name = "SET_SYM_DIFF" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetSymDiff) and m.positive: + # Symmetric difference in symmetric difference. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = False + for i in self.items: + m = m != i.matches(ch) + + return m == self.positive + +class SetUnion(SetBase): + _opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False): + OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE, + False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE, + True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV, + (FULLIGNORECASE, True): OP.SET_UNION_IGN_REV} + _op_name = "SET_UNION" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetUnion) and m.positive: + # Union in union. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + i = items[0] + return i.with_flags(positive=i.positive == self.positive, + case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + + characters, others = defaultdict(list), [] + for m in self.items: + if isinstance(m, Character): + characters[m.positive].append(m.value) + else: + others.append(m) + + code = [(self._opcode[self.case_flags, reverse], flags)] + + for positive, values in characters.items(): + flags = 0 + if positive: + flags |= POSITIVE_OP + if len(values) == 1: + code.append((OP.CHARACTER, flags, values[0])) + else: + code.append((OP.STRING, flags, len(values)) + tuple(values)) + + for m in others: + code.extend(m.compile()) + + code.append((OP.END, )) + + return code + + def matches(self, ch): + m = any(i.matches(ch) for i in self.items) + return m == self.positive + +class Skip(ZeroWidthBase): + _op_name = "SKIP" + _opcode = OP.SKIP + +class StartOfLine(ZeroWidthBase): + _opcode = OP.START_OF_LINE + _op_name = "START_OF_LINE" + +class StartOfLineU(StartOfLine): + _opcode = OP.START_OF_LINE_U + _op_name = "START_OF_LINE_U" + +class StartOfString(ZeroWidthBase): + _opcode = OP.START_OF_STRING + _op_name = "START_OF_STRING" + +class StartOfWord(ZeroWidthBase): + _opcode = OP.START_OF_WORD + _op_name = "START_OF_WORD" + +class String(RegexBase): + _opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN, + (FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD, + (NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV, + (FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True): + OP.STRING_FLD_REV} + + def __init__(self, characters, case_flags=NOCASE): + self.characters = tuple(characters) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE: + folded_characters = [] + for char in self.characters: + folded = _regex.fold_case(FULL_CASE_FOLDING, chr(char)) + folded_characters.extend(ord(c) for c in folded) + else: + folded_characters = self.characters + + self.folded_characters = tuple(folded_characters) + self.required = False + + self._key = self.__class__, self.characters, self.case_flags + + def get_firstset(self, reverse): + if reverse: + pos = -1 + else: + pos = 0 + return set([Character(self.characters[pos], + case_flags=self.case_flags)]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + if self.required: + flags |= REQUIRED_OP + return [(self._opcode[self.case_flags, reverse], flags, + len(self.folded_characters)) + self.folded_characters] + + def dump(self, indent, reverse): + display = ascii("".join(chr(c) for c in self.characters)).lstrip("bu") + print("{}STRING {}{}".format(INDENT * indent, display, + CASE_TEXT[self.case_flags])) + + def max_width(self): + return len(self.folded_characters) + + def get_required_string(self, reverse): + return 0, self + +class Literal(String): + def dump(self, indent, reverse): + literal = ''.join(chr(c) for c in self.characters) + display = ascii(literal).lstrip("bu") + print("{}LITERAL MATCH {}{}".format(INDENT * indent, display, + CASE_TEXT[self.case_flags])) + +class StringSet(Branch): + def __init__(self, info, name, case_flags=NOCASE): + self.info = info + self.name = name + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + self._key = self.__class__, self.name, self.case_flags + + self.set_key = (name, self.case_flags) + if self.set_key not in info.named_lists_used: + info.named_lists_used[self.set_key] = len(info.named_lists_used) + + index = self.info.named_lists_used[self.set_key] + items = self.info.kwargs[self.name] + + case_flags = self.case_flags + + encoding = self.info.flags & _ALL_ENCODINGS + fold_flags = encoding | case_flags + + choices = [] + + for string in items: + if isinstance(string, str): + string = [ord(c) for c in string] + + choices.append([Character(c, case_flags=case_flags) for c in + string]) + + # Sort from longest to shortest. + choices.sort(key=len, reverse=True) + + self.branches = [Sequence(choice) for choice in choices] + + def dump(self, indent, reverse): + print("{}STRING_SET {}{}".format(INDENT * indent, self.name, + CASE_TEXT[self.case_flags])) + + def __del__(self): + self.info = None + +class Source: + "Scanner for the regular expression source string." + def __init__(self, string): + if isinstance(string, str): + self.string = string + self.char_type = chr + else: + self.string = string.decode("latin-1") + self.char_type = lambda c: bytes([c]) + + self.pos = 0 + self.ignore_space = False + self.sep = string[ : 0] + + def get(self, override_ignore=False): + string = self.string + pos = self.pos + + try: + if self.ignore_space and not override_ignore: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + ch = string[pos] + self.pos = pos + 1 + return ch + except IndexError: + # We've reached the end of the string. + self.pos = pos + return string[ : 0] + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + return string[ : 0] + + def get_many(self, count=1): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + substring = [] + + while len(substring) < count: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + substring.append(string[pos]) + pos += 1 + + substring = "".join(substring) + else: + substring = string[pos : pos + count] + pos += len(substring) + + self.pos = pos + return substring + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + return "".join(substring) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + return "".join(substring) + + def get_while(self, test_set, include=True): + string = self.string + pos = self.pos + + if self.ignore_space: + try: + substring = [] + + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + elif (string[pos] in test_set) == include: + substring.append(string[pos]) + pos += 1 + else: + break + + self.pos = pos + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + + return "".join(substring) + else: + try: + while (string[pos] in test_set) == include: + pos += 1 + + substring = string[self.pos : pos] + + self.pos = pos + + return substring + except IndexError: + # We've reached the end of the string. + substring = string[self.pos : pos] + + self.pos = pos + + return substring + + def skip_while(self, test_set, include=True): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + elif (string[pos] in test_set) == include: + pos += 1 + else: + break + else: + while (string[pos] in test_set) == include: + pos += 1 + + self.pos = pos + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + + def match(self, substring): + string = self.string + pos = self.pos + + if self.ignore_space: + try: + for c in substring: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + if string[pos] != c: + return False + + pos += 1 + + self.pos = pos + + return True + except IndexError: + # We've reached the end of the string. + return False + except ValueError: + # The comment extended to the end of the string. + return False + else: + if not string.startswith(substring, pos): + return False + + self.pos = pos + len(substring) + + return True + + def expect(self, substring): + if not self.match(substring): + raise error("missing {}".format(substring), self.string, self.pos) + + def at_end(self): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + while True: + if string[pos].isspace(): + pos += 1 + elif string[pos] == "#": + pos = string.index("\n", pos) + else: + break + + return pos >= len(string) + except IndexError: + # We've reached the end of the string. + return True + except ValueError: + # The comment extended to the end of the string. + return True + +class Info: + "Info about the regular expression." + + def __init__(self, flags=0, char_type=None, kwargs={}): + flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION] + self.flags = flags + self.global_flags = flags + self.inline_locale = False + + self.kwargs = kwargs + + self.group_count = 0 + self.group_index = {} + self.group_name = {} + self.char_type = char_type + self.named_lists_used = {} + self.open_groups = [] + self.open_group_count = {} + self.defined_groups = {} + self.group_calls = [] + self.private_groups = {} + + def open_group(self, name=None): + group = self.group_index.get(name) + if group is None: + while True: + self.group_count += 1 + if name is None or self.group_count not in self.group_name: + break + + group = self.group_count + if name: + self.group_index[name] = group + self.group_name[group] = name + + if group in self.open_groups: + # We have a nested named group. We'll assign it a private group + # number, initially negative until we can assign a proper + # (positive) number. + group_alias = -(len(self.private_groups) + 1) + self.private_groups[group_alias] = group + group = group_alias + + self.open_groups.append(group) + self.open_group_count[group] = self.open_group_count.get(group, 0) + 1 + + return group + + def close_group(self): + self.open_groups.pop() + + def is_open_group(self, name): + # In version 1, a group reference can refer to an open group. We'll + # just pretend the group isn't open. + version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version == VERSION1: + return False + + if name.isdigit(): + group = int(name) + else: + group = self.group_index.get(name) + + return group in self.open_groups + +def _check_group_features(info, parsed): + """Checks whether the reverse and fuzzy features of the group calls match + the groups which they call. + """ + call_refs = {} + additional_groups = [] + for call, reverse, fuzzy in info.group_calls: + # Look up the reference of this group call. + key = (call.group, reverse, fuzzy) + ref = call_refs.get(key) + if ref is None: + # This group doesn't have a reference yet, so look up its features. + if call.group == 0: + # Calling the pattern as a whole. + rev = bool(info.flags & REVERSE) + fuz = isinstance(parsed, Fuzzy) + if (rev, fuz) != (reverse, fuzzy): + # The pattern as a whole doesn't have the features we want, + # so we'll need to make a copy of it with the desired + # features. + additional_groups.append((CallRef(len(call_refs), parsed), + reverse, fuzzy)) + else: + # Calling a capture group. + def_info = info.defined_groups[call.group] + group = def_info[0] + if def_info[1 : ] != (reverse, fuzzy): + # The group doesn't have the features we want, so we'll + # need to make a copy of it with the desired features. + additional_groups.append((group, reverse, fuzzy)) + + ref = len(call_refs) + call_refs[key] = ref + + call.call_ref = ref + + info.call_refs = call_refs + info.additional_groups = additional_groups + +def _get_required_string(parsed, flags): + "Gets the required string and related info of a parsed pattern." + + req_offset, required = parsed.get_required_string(bool(flags & REVERSE)) + if required: + required.required = True + if req_offset >= UNLIMITED: + req_offset = -1 + + req_flags = required.case_flags + if not (flags & UNICODE): + req_flags &= ~UNICODE + + req_chars = required.folded_characters + else: + req_offset = 0 + req_chars = () + req_flags = 0 + + return req_offset, req_chars, req_flags + +class Scanner: + def __init__(self, lexicon, flags=0): + self.lexicon = lexicon + + # Combine phrases into a compound pattern. + patterns = [] + for phrase, action in lexicon: + # Parse the regular expression. + source = Source(phrase) + info = Info(flags, source.char_type) + source.ignore_space = bool(info.flags & VERBOSE) + parsed = _parse_pattern(source, info) + if not source.at_end(): + raise error("unbalanced parenthesis", source.string, + source.pos) + + # We want to forbid capture groups within each phrase. + patterns.append(parsed.remove_captures()) + + # Combine all the subpatterns into one pattern. + info = Info(flags) + patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)] + parsed = Branch(patterns) + + # Optimise the compound pattern. + reverse = bool(info.flags & REVERSE) + parsed = parsed.optimise(info, reverse) + parsed = parsed.pack_characters(info) + + # Get the required string. + req_offset, req_chars, req_flags = _get_required_string(parsed, + info.flags) + + # Check the features of the groups. + _check_group_features(info, parsed) + + # Complain if there are any group calls. They are not supported by the + # Scanner class. + if info.call_refs: + raise error("recursive regex not supported by Scanner", + source.string, source.pos) + + reverse = bool(info.flags & REVERSE) + + # Compile the compound pattern. The result is a list of tuples. + code = parsed.compile(reverse) + [(OP.SUCCESS, )] + + # Flatten the code into a list of ints. + code = _flatten_code(code) + + if not parsed.has_simple_start(): + # Get the first set, if possible. + try: + fs_code = _compile_firstset(info, parsed.get_firstset(reverse)) + fs_code = _flatten_code(fs_code) + code = fs_code + code + except _FirstSetError: + pass + + # Check the global flags for conflicts. + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version not in (0, VERSION0, VERSION1): + raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible") + + # Create the PatternObject. + # + # Local flags like IGNORECASE affect the code generation, but aren't + # needed by the PatternObject itself. Conversely, global flags like + # LOCALE _don't_ affect the code generation but _are_ needed by the + # PatternObject. + self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version, + code, {}, {}, {}, [], req_offset, req_chars, req_flags, + len(patterns)) + + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while True: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex - 1][1] + if hasattr(action, '__call__'): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + + return result, string[i : ] + +# Get the known properties dict. +PROPERTIES = _regex.get_properties() + +# Build the inverse of the properties dict. +PROPERTY_NAMES = {} +for prop_name, (prop_id, values) in PROPERTIES.items(): + name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {})) + name = max(name, prop_name, key=len) + PROPERTY_NAMES[prop_id] = name, prop_values + + for val_name, val_id in values.items(): + prop_values[val_id] = max(prop_values.get(val_id, ""), val_name, + key=len) + +# Character escape sequences. +CHARACTER_ESCAPES = { + "a": "\a", + "b": "\b", + "f": "\f", + "n": "\n", + "r": "\r", + "t": "\t", + "v": "\v", +} + +# Predefined character set escape sequences. +CHARSET_ESCAPES = { + "d": lookup_property(None, "Digit", True), + "D": lookup_property(None, "Digit", False), + "h": lookup_property(None, "Blank", True), + "s": lookup_property(None, "Space", True), + "S": lookup_property(None, "Space", False), + "w": lookup_property(None, "Word", True), + "W": lookup_property(None, "Word", False), +} + +# Positional escape sequences. +POSITION_ESCAPES = { + "A": StartOfString(), + "b": Boundary(), + "B": Boundary(False), + "K": Keep(), + "m": StartOfWord(), + "M": EndOfWord(), + "Z": EndOfString(), +} + +# Positional escape sequences when WORD flag set. +WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES) +WORD_POSITION_ESCAPES.update({ + "b": DefaultBoundary(), + "B": DefaultBoundary(False), + "m": DefaultStartOfWord(), + "M": DefaultEndOfWord(), +}) + +# Regex control verbs. +VERBS = { + "FAIL": Failure(), + "F": Failure(), + "PRUNE": Prune(), + "SKIP": Skip(), +} diff --git a/env-llmeval/lib/python3.10/site-packages/regex/regex.py b/env-llmeval/lib/python3.10/site-packages/regex/regex.py new file mode 100644 index 0000000000000000000000000000000000000000..f8cc1776570fc3a0447a9a04c7063738f4ee57f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/regex/regex.py @@ -0,0 +1,746 @@ +# +# Secret Labs' Regular Expression Engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# +# 2010-01-16 mrab Python front-end re-written and extended + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to those +found in Perl. It supports both 8-bit and Unicode strings; both the pattern and +the strings being processed can contain null bytes and characters outside the +US ASCII range. + +Regular expressions can contain both special and ordinary characters. Most +ordinary characters, like "A", "a", or "0", are the simplest regular +expressions; they simply match themselves. You can concatenate ordinary +characters, so last matches the string 'last'. + +There are a few differences between the old (legacy) behaviour and the new +(enhanced) behaviour, which are indicated by VERSION0 or VERSION1. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the + newline at the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding + RE. Greedy means that it will match as many repetitions + as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding + RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special + characters. + *+,++,?+ Possessive versions of the previous three special + characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + {m,n}+ Possessive version of the above. + {...} Fuzzy matching constraints. + "\\" Either escapes special characters or signals a special + sequence. + [...] Indicates a set of characters. A "^" as the first + character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. The contents are + captured and can be retrieved or matched later in the + string. + (?flags-flags) VERSION1: Sets/clears the flags for the remainder of + the group or pattern; VERSION0: Sets the flags for the + entire pattern. + (?:...) Non-capturing version of regular parentheses. + (?>...) Atomic non-capturing version of regular parentheses. + (?flags-flags:...) Non-capturing version of regular parentheses with local + flags. + (?P...) The substring matched by the group is accessible by + name. + (?...) The substring matched by the group is accessible by + name. + (?P=name) Matches the text matched earlier by the group named + name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the + string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by .... + (? Matches the text matched by the group named name. + \G Matches the empty string, but only at the position where + the search started. + \h Matches horizontal whitespace. + \K Keeps only what follows for the entire match. + \L Named list. The list is provided as a keyword argument. + \m Matches the empty string, but only at the start of a word. + \M Matches the empty string, but only at the end of a word. + \n Matches the newline character. + \N{name} Matches the named character. + \p{name=value} Matches the character if its property has the specified + value. + \P{name=value} Matches the character if its property hasn't the specified + value. + \r Matches the carriage-return character. + \s Matches any whitespace character; equivalent to + [ \t\n\r\f\v]. + \S Matches any non-whitespace character; equivalent to [^\s]. + \t Matches the tab character. + \uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX. + \UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code + XXXXXXXX. + \v Matches the vertical tab character. + \w Matches any alphanumeric character; equivalent to + [a-zA-Z0-9_] when matching a bytestring or a Unicode string + with the ASCII flag, or the whole range of Unicode + alphanumeric characters (letters plus digits plus + underscore) when matching a Unicode string. With LOCALE, it + will match the set [0-9_] plus characters defined as + letters for the current locale. + \W Matches the complement of \w; equivalent to [^\w]. + \xXX Matches the character with 2-digit hex code XX. + \X Matches a grapheme. + \Z Matches only at the end of the string. + \\ Matches a literal backslash. + +This module exports the following functions: + match Match a regular expression pattern at the beginning of a string. + fullmatch Match a regular expression pattern against all of a string. + search Search a string for the presence of a pattern. + sub Substitute occurrences of a pattern found in a string using a + template string. + subf Substitute occurrences of a pattern found in a string using a + format string. + subn Same as sub, but also return the number of substitutions made. + subfn Same as subf, but also return the number of substitutions made. + split Split a string by the occurrences of a pattern. VERSION1: will + split at zero-width match; VERSION0: won't split at zero-width + match. + splititer Return an iterator yielding the parts of a split string. + findall Find all occurrences of a pattern in a string. + finditer Return an iterator yielding a match object for each match. + compile Compile a pattern into a Pattern object. + purge Clear the regular expression cache. + escape Backslash all non-alphanumerics or special characters in a + string. + +Most of the functions support a concurrent parameter: if True, the GIL will be +released during matching, allowing other Python threads to run concurrently. If +the string changes during matching, the behaviour is undefined. This parameter +is not needed when working on the builtin (immutable) string classes. + +Some of the functions in this module take flags as optional parameters. Most of +these flags can also be set within an RE: + A a ASCII Make \w, \W, \b, \B, \d, and \D match the + corresponding ASCII character categories. Default + when matching a bytestring. + B b BESTMATCH Find the best fuzzy match (default is first). + D DEBUG Print the parsed pattern. + E e ENHANCEMATCH Attempt to improve the fit after finding the first + fuzzy match. + F f FULLCASE Use full case-folding when performing + case-insensitive matching in Unicode. + I i IGNORECASE Perform case-insensitive matching. + L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the + current locale. (One byte per character only.) + M m MULTILINE "^" matches the beginning of lines (after a newline) + as well as the string. "$" matches the end of lines + (before a newline) as well as the end of the string. + P p POSIX Perform POSIX-standard matching (leftmost longest). + R r REVERSE Searches backwards. + S s DOTALL "." matches any character at all, including the + newline. + U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the + Unicode locale. Default when matching a Unicode + string. + V0 V0 VERSION0 Turn on the old legacy behaviour. + V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag + includes the FULLCASE flag. + W w WORD Make \b and \B work with default Unicode word breaks + and make ".", "^" and "$" work with Unicode line + breaks. + X x VERBOSE Ignore whitespace and comments for nicer looking REs. + +This module also defines an exception 'error'. + +""" + +# Public symbols. +__all__ = ["cache_all", "compile", "DEFAULT_VERSION", "escape", "findall", + "finditer", "fullmatch", "match", "purge", "search", "split", "splititer", + "sub", "subf", "subfn", "subn", "template", "Scanner", "A", "ASCII", "B", + "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", "S", "DOTALL", "F", + "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", "POSIX", + "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0", "V1", + "VERSION1", "X", "VERBOSE", "W", "WORD", "error", "Regex", "__version__", + "__doc__", "RegexFlag"] + +__version__ = "2.5.140" + +# -------------------------------------------------------------------- +# Public interface. + +def match(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern at the start of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.match(string, pos, endpos, concurrent, partial, timeout) + +def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern against all of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.fullmatch(string, pos, endpos, concurrent, partial, timeout) + +def search(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Search through string looking for a match to the pattern, returning a + match object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.search(string, pos, endpos, concurrent, partial, timeout) + +def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement repl. repl can be either a string or a callable; if a string, + backslash escapes in it are processed; if a callable, it's passed the match + object and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.sub(repl, string, count, pos, endpos, concurrent, timeout) + +def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement format. format can be either a string or a callable; if a string, + it's treated as a format string; if a callable, it's passed the match object + and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subf(format, string, count, pos, endpos, concurrent, timeout) + +def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement repl. number is the number of substitutions that were made. repl + can be either a string or a callable; if a string, backslash escapes in it + are processed; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subn(repl, string, count, pos, endpos, concurrent, timeout) + +def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement format. number is the number of substitutions that were made. format + can be either a string or a callable; if a string, it's treated as a format + string; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subfn(format, string, count, pos, endpos, concurrent, timeout) + +def split(pattern, string, maxsplit=0, flags=0, concurrent=None, timeout=None, + ignore_unused=False, **kwargs): + """Split the source string by the occurrences of the pattern, returning a + list containing the resulting substrings. If capturing parentheses are used + in pattern, then the text of all groups in the pattern are also returned as + part of the resulting list. If maxsplit is nonzero, at most maxsplit splits + occur, and the remainder of the string is returned as the final element of + the list.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.split(string, maxsplit, concurrent, timeout) + +def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, + timeout=None, ignore_unused=False, **kwargs): + "Return an iterator yielding the parts of a split string." + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.splititer(string, maxsplit, concurrent, timeout) + +def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a list of all matches in the string. The matches may be overlapped + if overlapped is True. If one or more groups are present in the pattern, + return a list of groups; this will be a list of tuples if the pattern has + more than one group. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.findall(string, pos, endpos, overlapped, concurrent, timeout) + +def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + partial=False, concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return an iterator over all matches in the string. The matches may be + overlapped if overlapped is True. For each match, the iterator returns a + match object. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.finditer(string, pos, endpos, overlapped, concurrent, partial, + timeout) + +def compile(pattern, flags=0, ignore_unused=False, cache_pattern=None, **kwargs): + "Compile a regular expression pattern, returning a pattern object." + if cache_pattern is None: + cache_pattern = _cache_all + return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern) + +def purge(): + "Clear the regular expression cache" + _cache.clear() + _locale_sensitive.clear() + +# Whether to cache all patterns. +_cache_all = True + +def cache_all(value=True): + """Sets whether to cache all patterns, even those are compiled explicitly. + Passing None has no effect, but returns the current setting.""" + global _cache_all + + if value is None: + return _cache_all + + _cache_all = value + +def template(pattern, flags=0): + "Compile a template pattern, returning a pattern object." + return _compile(pattern, flags | TEMPLATE, False, {}, False) + +def escape(pattern, special_only=True, literal_spaces=False): + """Escape a string for use as a literal in a pattern. If special_only is + True, escape only special characters, else escape all non-alphanumeric + characters. If literal_spaces is True, don't escape spaces.""" + # Convert it to Unicode. + if isinstance(pattern, bytes): + p = pattern.decode("latin-1") + else: + p = pattern + + s = [] + if special_only: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _METACHARS or c.isspace(): + s.append("\\") + s.append(c) + else: + s.append(c) + else: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _ALNUM: + s.append(c) + else: + s.append("\\") + s.append(c) + + r = "".join(s) + # Convert it back to bytes if necessary. + if isinstance(pattern, bytes): + r = r.encode("latin-1") + + return r + +# -------------------------------------------------------------------- +# Internals. + +import regex._regex_core as _regex_core +import regex._regex as _regex +from threading import RLock as _RLock +from locale import getpreferredencoding as _getpreferredencoding +from regex._regex_core import * +from regex._regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError, + _UnscopedFlagSet, _check_group_features, _compile_firstset, + _compile_replacement, _flatten_code, _fold_case, _get_required_string, + _parse_pattern, _shrink_cache) +from regex._regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source + as _Source, Fuzzy as _Fuzzy) + +# Version 0 is the old behaviour, compatible with the original 're' module. +# Version 1 is the new behaviour, which differs slightly. + +DEFAULT_VERSION = VERSION0 + +_METACHARS = frozenset("()[]{}?*+|^$\\.-#&~") + +_regex_core.DEFAULT_VERSION = DEFAULT_VERSION + +# Caches for the patterns and replacements. +_cache = {} +_cache_lock = _RLock() +_named_args = {} +_replacement_cache = {} +_locale_sensitive = {} + +# Maximum size of the cache. +_MAXCACHE = 500 +_MAXREPCACHE = 500 + +def _compile(pattern, flags, ignore_unused, kwargs, cache_it): + "Compiles a regular expression to a PatternObject." + + global DEFAULT_VERSION + try: + from regex import DEFAULT_VERSION + except ImportError: + pass + + # We won't bother to cache the pattern if we're debugging. + if (flags & DEBUG) != 0: + cache_it = False + + # What locale is this pattern using? + locale_key = (type(pattern), pattern) + if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0: + # This pattern is, or might be, locale-sensitive. + pattern_locale = _getpreferredencoding() + else: + # This pattern is definitely not locale-sensitive. + pattern_locale = None + + def complain_unused_args(): + if ignore_unused: + return + + # Complain about any unused keyword arguments, possibly resulting from a typo. + unused_kwargs = set(kwargs) - {k for k, v in args_needed} + if unused_kwargs: + any_one = next(iter(unused_kwargs)) + raise ValueError('unused keyword argument {!a}'.format(any_one)) + + if cache_it: + try: + # Do we know what keyword arguments are needed? + args_key = pattern, type(pattern), flags + args_needed = _named_args[args_key] + + # Are we being provided with its required keyword arguments? + args_supplied = set() + if args_needed: + for k, v in args_needed: + try: + args_supplied.add((k, frozenset(kwargs[k]))) + except KeyError: + raise error("missing named list: {!r}".format(k)) + + complain_unused_args() + + args_supplied = frozenset(args_supplied) + + # Have we already seen this regular expression and named list? + pattern_key = (pattern, type(pattern), flags, args_supplied, + DEFAULT_VERSION, pattern_locale) + return _cache[pattern_key] + except KeyError: + # It's a new pattern, or new named list for a known pattern. + pass + + # Guess the encoding from the class of the pattern string. + if isinstance(pattern, str): + guess_encoding = UNICODE + elif isinstance(pattern, bytes): + guess_encoding = ASCII + elif isinstance(pattern, Pattern): + if flags: + raise ValueError("cannot process flags argument with a compiled pattern") + + return pattern + else: + raise TypeError("first argument must be a string or compiled pattern") + + # Set the default version in the core code in case it has been changed. + _regex_core.DEFAULT_VERSION = DEFAULT_VERSION + + global_flags = flags + + while True: + caught_exception = None + try: + source = _Source(pattern) + info = _Info(global_flags, source.char_type, kwargs) + info.guess_encoding = guess_encoding + source.ignore_space = bool(info.flags & VERBOSE) + parsed = _parse_pattern(source, info) + break + except _UnscopedFlagSet: + # Remember the global flags for the next attempt. + global_flags = info.global_flags + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + if not source.at_end(): + raise error("unbalanced parenthesis", pattern, source.pos) + + # Check the global flags for conflicts. + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version not in (0, VERSION0, VERSION1): + raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible") + + if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE): + raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible") + + if isinstance(pattern, bytes) and (info.flags & UNICODE): + raise ValueError("cannot use UNICODE flag with a bytes pattern") + + if not (info.flags & _ALL_ENCODINGS): + if isinstance(pattern, str): + info.flags |= UNICODE + else: + info.flags |= ASCII + + reverse = bool(info.flags & REVERSE) + fuzzy = isinstance(parsed, _Fuzzy) + + # Remember whether this pattern as an inline locale flag. + _locale_sensitive[locale_key] = info.inline_locale + + # Fix the group references. + caught_exception = None + try: + parsed.fix_groups(pattern, reverse, False) + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + # Should we print the parsed pattern? + if flags & DEBUG: + parsed.dump(indent=0, reverse=reverse) + + # Optimise the parsed pattern. + parsed = parsed.optimise(info, reverse) + parsed = parsed.pack_characters(info) + + # Get the required string. + req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags) + + # Build the named lists. + named_lists = {} + named_list_indexes = [None] * len(info.named_lists_used) + args_needed = set() + for key, index in info.named_lists_used.items(): + name, case_flags = key + values = frozenset(kwargs[name]) + if case_flags: + items = frozenset(_fold_case(info, v) for v in values) + else: + items = values + named_lists[name] = values + named_list_indexes[index] = items + args_needed.add((name, values)) + + complain_unused_args() + + # Check the features of the groups. + _check_group_features(info, parsed) + + # Compile the parsed pattern. The result is a list of tuples. + code = parsed.compile(reverse) + + # Is there a group call to the pattern as a whole? + key = (0, reverse, fuzzy) + ref = info.call_refs.get(key) + if ref is not None: + code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )] + + # Add the final 'success' opcode. + code += [(_OP.SUCCESS, )] + + # Compile the additional copies of the groups that we need. + for group, rev, fuz in info.additional_groups: + code += group.compile(rev, fuz) + + # Flatten the code into a list of ints. + code = _flatten_code(code) + + if not parsed.has_simple_start(): + # Get the first set, if possible. + try: + fs_code = _compile_firstset(info, parsed.get_firstset(reverse)) + fs_code = _flatten_code(fs_code) + code = fs_code + code + except _FirstSetError: + pass + + # The named capture groups. + index_group = dict((v, n) for n, v in info.group_index.items()) + + # Create the PatternObject. + # + # Local flags like IGNORECASE affect the code generation, but aren't needed + # by the PatternObject itself. Conversely, global flags like LOCALE _don't_ + # affect the code generation but _are_ needed by the PatternObject. + compiled_pattern = _regex.compile(pattern, info.flags | version, code, + info.group_index, index_group, named_lists, named_list_indexes, + req_offset, req_chars, req_flags, info.group_count) + + # Do we need to reduce the size of the cache? + if len(_cache) >= _MAXCACHE: + with _cache_lock: + _shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE) + + if cache_it: + if (info.flags & LOCALE) == 0: + pattern_locale = None + + args_needed = frozenset(args_needed) + + # Store this regular expression and named list. + pattern_key = (pattern, type(pattern), flags, args_needed, + DEFAULT_VERSION, pattern_locale) + _cache[pattern_key] = compiled_pattern + + # Store what keyword arguments are needed. + _named_args[args_key] = args_needed + + return compiled_pattern + +def _compile_replacement_helper(pattern, template): + "Compiles a replacement template." + # This function is called by the _regex module. + + # Have we seen this before? + key = pattern.pattern, pattern.flags, template + compiled = _replacement_cache.get(key) + if compiled is not None: + return compiled + + if len(_replacement_cache) >= _MAXREPCACHE: + _replacement_cache.clear() + + is_unicode = isinstance(template, str) + source = _Source(template) + if is_unicode: + def make_string(char_codes): + return "".join(chr(c) for c in char_codes) + else: + def make_string(char_codes): + return bytes(char_codes) + + compiled = [] + literal = [] + while True: + ch = source.get() + if not ch: + break + if ch == "\\": + # '_compile_replacement' will return either an int group reference + # or a string literal. It returns items (plural) in order to handle + # a 2-character literal (an invalid escape sequence). + is_group, items = _compile_replacement(source, pattern, is_unicode) + if is_group: + # It's a group, so first flush the literal. + if literal: + compiled.append(make_string(literal)) + literal = [] + compiled.extend(items) + else: + literal.extend(items) + else: + literal.append(ord(ch)) + + # Flush the literal. + if literal: + compiled.append(make_string(literal)) + + _replacement_cache[key] = compiled + + return compiled + +# We define Pattern here after all the support objects have been defined. +_pat = _compile('', 0, False, {}, False) +Pattern = type(_pat) +Match = type(_pat.match('')) +del _pat + +# Make Pattern public for typing annotations. +__all__.append("Pattern") +__all__.append("Match") + +# We'll define an alias for the 'compile' function so that the repr of a +# pattern object is eval-able. +Regex = compile + +# Register myself for pickling. +import copyreg as _copy_reg + +def _pickle(pattern): + return _regex.compile, pattern._pickled_data + +_copy_reg.pickle(Pattern, _pickle) diff --git a/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py b/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py new file mode 100644 index 0000000000000000000000000000000000000000..21cdb8a06d727055fc1b93c2c264639ddda0cc69 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py @@ -0,0 +1,4458 @@ +from weakref import proxy +import copy +import pickle +import regex +import string +import sys +import unittest + +# String subclasses for issue 18468. +class StrSubclass(str): + def __getitem__(self, index): + return StrSubclass(super().__getitem__(index)) + +class BytesSubclass(bytes): + def __getitem__(self, index): + return BytesSubclass(super().__getitem__(index)) + +class RegexTests(unittest.TestCase): + PATTERN_CLASS = "" + FLAGS_WITH_COMPILED_PAT = "cannot process flags argument with a compiled pattern" + INVALID_GROUP_REF = "invalid group reference" + MISSING_GT = "missing >" + BAD_GROUP_NAME = "bad character in group name" + MISSING_GROUP_NAME = "missing group name" + MISSING_LT = "missing <" + UNKNOWN_GROUP_I = "unknown group" + UNKNOWN_GROUP = "unknown group" + BAD_ESCAPE = r"bad escape \(end of pattern\)" + BAD_OCTAL_ESCAPE = r"bad escape \\" + BAD_SET = "unterminated character set" + STR_PAT_ON_BYTES = "cannot use a string pattern on a bytes-like object" + BYTES_PAT_ON_STR = "cannot use a bytes pattern on a string-like object" + STR_PAT_BYTES_TEMPL = "expected str instance, bytes found" + BYTES_PAT_STR_TEMPL = "expected a bytes-like object, str found" + BYTES_PAT_UNI_FLAG = "cannot use UNICODE flag with a bytes pattern" + MIXED_FLAGS = "ASCII, LOCALE and UNICODE flags are mutually incompatible" + MISSING_RPAREN = "missing \\)" + TRAILING_CHARS = "unbalanced parenthesis" + BAD_CHAR_RANGE = "bad character range" + NOTHING_TO_REPEAT = "nothing to repeat" + MULTIPLE_REPEAT = "multiple repeat" + OPEN_GROUP = "cannot refer to an open group" + DUPLICATE_GROUP = "duplicate group" + CANT_TURN_OFF = "bad inline flags: cannot turn flags off" + UNDEF_CHAR_NAME = "undefined character name" + + def assertTypedEqual(self, actual, expect, msg=None): + self.assertEqual(actual, expect, msg) + + def recurse(actual, expect): + if isinstance(expect, (tuple, list)): + for x, y in zip(actual, expect): + recurse(x, y) + else: + self.assertIs(type(actual), type(expect), msg) + + recurse(actual, expect) + + def test_weakref(self): + s = 'QabbbcR' + x = regex.compile('ab+c') + y = proxy(x) + if x.findall('QabbbcR') != y.findall('QabbbcR'): + self.fail() + + def test_search_star_plus(self): + self.assertEqual(regex.search('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.search('x*', 'axx').span(), (0, 0)) + self.assertEqual(regex.search('x+', 'axx').span(0), (1, 3)) + self.assertEqual(regex.search('x+', 'axx').span(), (1, 3)) + self.assertEqual(regex.search('x', 'aaa'), None) + self.assertEqual(regex.match('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.match('a*', 'xxx').span(), (0, 0)) + self.assertEqual(regex.match('x*', 'xxxa').span(0), (0, 3)) + self.assertEqual(regex.match('x*', 'xxxa').span(), (0, 3)) + self.assertEqual(regex.match('a+', 'xxx'), None) + + def bump_num(self, matchobj): + int_value = int(matchobj[0]) + return str(int_value + 1) + + def test_basic_regex_sub(self): + self.assertEqual(regex.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), + '9.3 -3 24x100y') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), + '9.3 -3 23x99y') + + self.assertEqual(regex.sub('.', lambda m: r"\n", 'x'), "\\n") + self.assertEqual(regex.sub('.', r"\n", 'x'), "\n") + + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g<1>', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), + 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g<1>\g<1>', 'xx'), 'xxxx') + + self.assertEqual(regex.sub('a', r'\t\n\v\r\f\a\b', 'a'), "\t\n\v\r\f\a\b") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), "\t\n\v\r\f\a") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7)) + + self.assertEqual(regex.sub(r'^\s*', 'X', 'test'), 'Xtest') + + self.assertEqual(regex.sub(r"x", r"\x0A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\u000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\U0000000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\N{LATIN CAPITAL LETTER A}", + "x"), "A") + + self.assertEqual(regex.sub(br"x", br"\x0A", b"x"), b"\n") + + def test_bug_449964(self): + # Fails for group followed by other escape. + self.assertEqual(regex.sub(r'(?Px)', r'\g<1>\g<1>\b', 'xx'), + "xx\bxx\b") + + def test_bug_449000(self): + # Test for sub() on escaped characters. + self.assertEqual(regex.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + + def test_bug_1661(self): + # Verify that flags do not get silently ignored with compiled patterns + pattern = regex.compile('.') + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.match(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.search(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.findall(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.compile(pattern, regex.I)) + + def test_bug_3629(self): + # A regex that triggered a bug in the sre-code validator + self.assertEqual(repr(type(regex.compile("(?P)(?(quote))"))), + self.PATTERN_CLASS) + + def test_sub_template_numeric_escape(self): + # Bug 776311 and friends. + self.assertEqual(regex.sub('x', r'\0', 'x'), "\0") + self.assertEqual(regex.sub('x', r'\000', 'x'), "\000") + self.assertEqual(regex.sub('x', r'\001', 'x'), "\001") + self.assertEqual(regex.sub('x', r'\008', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\009', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\111', 'x'), "\111") + self.assertEqual(regex.sub('x', r'\117', 'x'), "\117") + + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\1111") + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\111" + "1") + + self.assertEqual(regex.sub('x', r'\00', 'x'), '\x00') + self.assertEqual(regex.sub('x', r'\07', 'x'), '\x07') + self.assertEqual(regex.sub('x', r'\08', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\09', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\0a', 'x'), "\0" + "a") + + self.assertEqual(regex.sub('x', r'\400', 'x'), "\u0100") + self.assertEqual(regex.sub('x', r'\777', 'x'), "\u01FF") + self.assertEqual(regex.sub(b'x', br'\400', b'x'), b"\x00") + self.assertEqual(regex.sub(b'x', br'\777', b'x'), b"\xFF") + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\8', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\9', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\18', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\90', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\99', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\118', 'x')) # r'\11' + '8' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\181', 'x')) # r'\18' + '1' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\800', 'x')) # r'\80' + '0' + + # In Python 2.3 (etc), these loop endlessly in sre_parser.py. + self.assertEqual(regex.sub('(((((((((((x)))))))))))', r'\11', 'x'), + 'x') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), + 'xz8') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), + 'xza') + + def test_qualified_re_sub(self): + self.assertEqual(regex.sub('a', 'b', 'aaaaa'), 'bbbbb') + self.assertEqual(regex.sub('a', 'b', 'aaaaa', 1), 'baaaa') + + def test_bug_114660(self): + self.assertEqual(regex.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), + 'hello there') + + def test_bug_462270(self): + # Test for empty sub() behaviour, see SF bug #462270 + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b--d-') + else: + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b-d-') + self.assertEqual(regex.sub('(?V1)x*', '-', 'abxd'), '-a-b--d-') + self.assertEqual(regex.sub('x+', '-', 'abxd'), 'ab-d') + + def test_bug_14462(self): + # chr(255) is a valid identifier in Python 3. + group_name = '\xFF' + self.assertEqual(regex.search(r'(?P<' + group_name + '>a)', + 'abc').group(group_name), 'a') + + def test_symbolic_refs(self): + self.assertRaisesRegex(regex.error, self.MISSING_GT, lambda: + regex.sub('(?Px)', r'\gx)', r'\g<', 'xx')) + self.assertRaisesRegex(regex.error, self.MISSING_LT, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<1a1>', 'xx')) + self.assertRaisesRegex(IndexError, self.UNKNOWN_GROUP_I, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + + # The new behaviour of unmatched but valid groups is to treat them like + # empty matches in the replacement template, like in Perl. + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\g', 'xx'), '') + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\2', 'xx'), '') + + # The old behaviour was to raise it as an IndexError. + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<-1>', 'xx')) + + def test_re_subn(self): + self.assertEqual(regex.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) + self.assertEqual(regex.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) + self.assertEqual(regex.subn("b+", "x", "xyz"), ('xyz', 0)) + self.assertEqual(regex.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) + self.assertEqual(regex.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) + + def test_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c']) + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(":*", ":a:b::c"), ['', '', 'a', '', + 'b', '', 'c', '']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', '', '', + 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', '', + None, 'a', ':', '', None, 'b', ':', '', None, 'c', None, '']) + else: + self.assertEqual(regex.split(":*", ":a:b::c"), ['', 'a', 'b', 'c']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', 'a', + ':', 'b', '::', 'c']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', 'a', 'b', + 'c']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', 'a', + ':', 'b', ':', 'c']) + self.assertEqual(regex.split("([b:]+)", ":a:b::c"), ['', ':', 'a', + ':b::', 'c']) + self.assertEqual(regex.split("(b)|(:+)", ":a:b::c"), ['', None, ':', + 'a', None, ':', '', 'b', None, '', None, '::', 'c']) + self.assertEqual(regex.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', + '', 'c']) + + self.assertEqual(regex.split("x", "xaxbxc"), ['', 'a', 'b', 'c']) + self.assertEqual([m for m in regex.splititer("x", "xaxbxc")], ['', 'a', + 'b', 'c']) + + self.assertEqual(regex.split("(?r)x", "xaxbxc"), ['c', 'b', 'a', '']) + self.assertEqual([m for m in regex.splititer("(?r)x", "xaxbxc")], ['c', + 'b', 'a', '']) + + self.assertEqual(regex.split("(x)|(y)", "xaxbxc"), ['', 'x', None, 'a', + 'x', None, 'b', 'x', None, 'c']) + self.assertEqual([m for m in regex.splititer("(x)|(y)", "xaxbxc")], + ['', 'x', None, 'a', 'x', None, 'b', 'x', None, 'c']) + + self.assertEqual(regex.split("(?r)(x)|(y)", "xaxbxc"), ['c', 'x', None, + 'b', 'x', None, 'a', 'x', None, '']) + self.assertEqual([m for m in regex.splititer("(?r)(x)|(y)", "xaxbxc")], + ['c', 'x', None, 'b', 'x', None, 'a', 'x', None, '']) + + self.assertEqual(regex.split(r"(?V1)\b", "a b c"), ['', 'a', ' ', 'b', + ' ', 'c', '']) + self.assertEqual(regex.split(r"(?V1)\m", "a b c"), ['', 'a ', 'b ', + 'c']) + self.assertEqual(regex.split(r"(?V1)\M", "a b c"), ['a', ' b', ' c', + '']) + + def test_qualified_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) + self.assertEqual(regex.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) + self.assertEqual(regex.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', + 'b::c']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', '', + '', 'a:b::c']) + else: + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', 'a', + ':', 'b::c']) + + def test_re_findall(self): + self.assertEqual(regex.findall(":+", "abc"), []) + self.assertEqual(regex.findall(":+", "a:b::c:::d"), [':', '::', ':::']) + self.assertEqual(regex.findall("(:+)", "a:b::c:::d"), [':', '::', + ':::']) + self.assertEqual(regex.findall("(:)(:*)", "a:b::c:::d"), [(':', ''), + (':', ':'), (':', '::')]) + + self.assertEqual(regex.findall(r"\((?P.{0,5}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?T)\)", "(MY T)"), + ["MY T"]) + + self.assertEqual(regex.findall(r"[^a]{2}[A-Z]", "\n S"), [' S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), ['\n S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), [' S']) + + self.assertEqual(regex.findall(r"X(Y[^Y]+?){1,2}( |Q)+DEF", + "XYABCYPPQ\nQ DEF"), [('YPPQ\n', ' ')]) + + self.assertEqual(regex.findall(r"(\nTest(\n+.+?){0,2}?)?\n+End", + "\nTest\nxyz\nxyz\nEnd"), [('\nTest\nxyz\nxyz', '\nxyz')]) + + def test_bug_117612(self): + self.assertEqual(regex.findall(r"(a|(b))", "aba"), [('a', ''), ('b', + 'b'), ('a', '')]) + + def test_re_match(self): + self.assertEqual(regex.match('a', 'a')[:], ('a',)) + self.assertEqual(regex.match('(a)', 'a')[:], ('a', 'a')) + self.assertEqual(regex.match(r'(a)', 'a')[0], 'a') + self.assertEqual(regex.match(r'(a)', 'a')[1], 'a') + self.assertEqual(regex.match(r'(a)', 'a').group(1, 1), ('a', 'a')) + + pat = regex.compile('((a)|(b))(c)?') + self.assertEqual(pat.match('a')[:], ('a', 'a', 'a', None, None)) + self.assertEqual(pat.match('b')[:], ('b', 'b', None, 'b', None)) + self.assertEqual(pat.match('ac')[:], ('ac', 'a', 'a', None, 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + + # A single group. + m = regex.match('(a)', 'a') + self.assertEqual(m.group(), 'a') + self.assertEqual(m.group(0), 'a') + self.assertEqual(m.group(1), 'a') + self.assertEqual(m.group(1, 1), ('a', 'a')) + + pat = regex.compile('(?:(?Pa)|(?Pb))(?Pc)?') + self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) + self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', + None)) + self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) + + def test_re_groupref_exists(self): + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a)')[:], + ('(a)', '(', 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a)'), None) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'ab')[:], ('ab', + 'a', 'b')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'a')[:], ('a', + 'a', '')) + + # Tests for bug #1177831: exercise groups other than the first group. + p = regex.compile('(?Pa)(?Pb)?((?(g2)c|d))') + self.assertEqual(p.match('abc')[:], ('abc', 'a', 'b', 'c')) + self.assertEqual(p.match('ad')[:], ('ad', 'a', None, 'd')) + self.assertEqual(p.match('abd'), None) + self.assertEqual(p.match('ac'), None) + + def test_re_groupref(self): + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a|')[:], ('|a|', + '|', 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1?$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', 'a|'), None) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)$', 'aa')[:], ('aa', 'a', + 'a')) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)?$', 'c')[:], ('c', None, + None)) + + self.assertEqual(regex.findall(r"(?i)(.{1,40}?),(.{1,40}?)(?:;)+(.{1,80}).{1,40}?\3(\ |;)+(.{1,80}?)\1", + "TEST, BEST; LEST ; Lest 123 Test, Best"), [('TEST', ' BEST', + ' LEST', ' ', '123 ')]) + + def test_groupdict(self): + self.assertEqual(regex.match('(?Pfirst) (?Psecond)', + 'first second').groupdict(), {'first': 'first', 'second': 'second'}) + + def test_expand(self): + self.assertEqual(regex.match("(?Pfirst) (?Psecond)", + "first second").expand(r"\2 \1 \g \g"), + 'second first second first') + + def test_repeat_minmax(self): + self.assertEqual(regex.match(r"^(\w){1}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1}?$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}?$", "abc"), None) + + self.assertEqual(regex.match(r"^(\w){3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + + self.assertEqual(regex.match("^x{1}$", "xxx"), None) + self.assertEqual(regex.match("^x{1}?$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}?$", "xxx"), None) + + self.assertEqual(regex.match("^x{1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{1}?", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}?", "xxx")[0], '') + + self.assertEqual(bool(regex.match("^x{3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + + self.assertEqual(regex.match("^x{}$", "xxx"), None) + self.assertEqual(bool(regex.match("^x{}$", "x{}")), True) + + def test_getattr(self): + self.assertEqual(regex.compile("(?i)(a)(b)").pattern, '(?i)(a)(b)') + self.assertEqual(regex.compile("(?i)(a)(b)").flags, regex.I | regex.U | + regex.DEFAULT_VERSION) + self.assertEqual(regex.compile(b"(?i)(a)(b)").flags, regex.A | regex.I + | regex.DEFAULT_VERSION) + self.assertEqual(regex.compile("(?i)(a)(b)").groups, 2) + self.assertEqual(regex.compile("(?i)(a)(b)").groupindex, {}) + + self.assertEqual(regex.compile("(?i)(?Pa)(?Pb)").groupindex, + {'first': 1, 'other': 2}) + + self.assertEqual(regex.match("(a)", "a").pos, 0) + self.assertEqual(regex.match("(a)", "a").endpos, 1) + + self.assertEqual(regex.search("b(c)", "abcdef").pos, 0) + self.assertEqual(regex.search("b(c)", "abcdef").endpos, 6) + self.assertEqual(regex.search("b(c)", "abcdef").span(), (1, 3)) + self.assertEqual(regex.search("b(c)", "abcdef").span(1), (2, 3)) + + self.assertEqual(regex.match("(a)", "a").string, 'a') + self.assertEqual(regex.match("(a)", "a").regs, ((0, 1), (0, 1))) + self.assertEqual(repr(type(regex.match("(a)", "a").re)), + self.PATTERN_CLASS) + + # Issue 14260. + p = regex.compile(r'abc(?Pdef)') + p.groupindex["n"] = 0 + self.assertEqual(p.groupindex["n"], 1) + + def test_special_escapes(self): + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx")[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd")[1], 'bx') + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx", + regex.UNICODE)[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd", + regex.UNICODE)[1], 'bx') + + self.assertEqual(regex.search(r"^abc$", "\nabc\n", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "abc", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "\nabc\n", regex.M), None) + + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx")[1], + b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd")[1], + b'bx') + self.assertEqual(regex.search(br"^abc$", b"\nabc\n", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"abc", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"\nabc\n", regex.M), + None) + + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a")[0], '1aa! a') + self.assertEqual(regex.search(br"\d\D\w\W\s\S", b"1aa! a", + regex.LOCALE)[0], b'1aa! a') + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a", + regex.UNICODE)[0], '1aa! a') + + def test_bigcharset(self): + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222")[1], + '\u2222') + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222", + regex.UNICODE)[1], '\u2222') + self.assertEqual("".join(regex.findall(".", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"[e\xe8\xe9\xea\xeb\u0113\u011b\u0117]", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"e|\xe8|\xe9|\xea|\xeb|\u0113|\u011b|\u0117", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + + def test_anyall(self): + self.assertEqual(regex.match("a.b", "a\nb", regex.DOTALL)[0], "a\nb") + self.assertEqual(regex.match("a.*b", "a\n\nb", regex.DOTALL)[0], + "a\n\nb") + + def test_non_consuming(self): + self.assertEqual(regex.match(r"(a(?=\s[^a]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[^a]*))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]*))", "a bc")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1)", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1*)", "a aa")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s(abc|a))", "a a")[1], 'a') + + self.assertEqual(regex.match(r"(a(?!\s[^a]))", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a(?!\s[abc]))", "a d")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s\1)", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s(abc|a))", "a b")[1], 'a') + + def test_ignore_case(self): + self.assertEqual(regex.match("abc", "ABC", regex.I)[0], 'ABC') + self.assertEqual(regex.match(b"abc", b"ABC", regex.I)[0], b'ABC') + + self.assertEqual(regex.match(r"(a\s[^a]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"(a\s[abc])", "a b", regex.I)[1], 'a b') + self.assertEqual(regex.match(r"(a\s[abc]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"((a)\s\2)", "a a", regex.I)[1], 'a a') + self.assertEqual(regex.match(r"((a)\s\2*)", "a aa", regex.I)[1], + 'a aa') + self.assertEqual(regex.match(r"((a)\s(abc|a))", "a a", regex.I)[1], + 'a a') + self.assertEqual(regex.match(r"((a)\s(abc|a)*)", "a aa", regex.I)[1], + 'a aa') + + # Issue 3511. + self.assertEqual(regex.match(r"[Z-a]", "_").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[Z-a]", "_").span(), (0, 1)) + + self.assertEqual(bool(regex.match(r"(?i)nao", "nAo")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "n\xC3o")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "N\xC3O")), True) + self.assertEqual(bool(regex.match(r"(?i)s", "\u017F")), True) + + def test_case_folding(self): + self.assertEqual(regex.search(r"(?fi)ss", "SS").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", "ss").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", + "\N{LATIN SMALL LETTER SHARP S}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LETTER SHARP S}", + "SS").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "ST").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE ST}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 1)) + + self.assertEqual(regex.search(r"(?fi)SST", + "\N{LATIN SMALL LETTER SHARP S}t").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "SST").span(), (1, 3)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FFI}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FF}i").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)FFI", + "f\N{LATIN SMALL LIGATURE FI}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FFI}", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FF}i", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)f\N{LATIN SMALL LIGATURE FI}", + "FFI").span(), (0, 3)) + + sigma = "\u03A3\u03C3\u03C2" + for ch1 in sigma: + for ch2 in sigma: + if not regex.match(r"(?fi)" + ch1, ch2): + self.fail() + + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + + self.assertEqual(regex.findall(r"(?iV0)\m(?:word){e<=3}\M(?ne", "affine", + options=["\N{LATIN SMALL LIGATURE FFI}"]).span(), (0, 6)) + self.assertEqual(regex.search(r"(?fi)a\Lne", + "a\N{LATIN SMALL LIGATURE FFI}ne", options=["ffi"]).span(), (0, 4)) + + def test_category(self): + self.assertEqual(regex.match(r"(\s)", " ")[1], ' ') + + def test_not_literal(self): + self.assertEqual(regex.search(r"\s([^a])", " b")[1], 'b') + self.assertEqual(regex.search(r"\s([^a]*)", " bb")[1], 'bb') + + def test_search_coverage(self): + self.assertEqual(regex.search(r"\s(b)", " b")[1], 'b') + self.assertEqual(regex.search(r"a\s", "a ")[0], 'a ') + + def test_re_escape(self): + p = "" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + p += chr(i) + self.assertEqual(bool(regex.match(regex.escape(chr(i)), chr(i))), + True) + self.assertEqual(regex.match(regex.escape(chr(i)), chr(i)).span(), + (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_re_escape_byte(self): + p = b"" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + b = bytes([i]) + p += b + self.assertEqual(bool(regex.match(regex.escape(b), b)), True) + self.assertEqual(regex.match(regex.escape(b), b).span(), (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_constants(self): + if regex.I != regex.IGNORECASE: + self.fail() + if regex.L != regex.LOCALE: + self.fail() + if regex.M != regex.MULTILINE: + self.fail() + if regex.S != regex.DOTALL: + self.fail() + if regex.X != regex.VERBOSE: + self.fail() + + def test_flags(self): + for flag in [regex.I, regex.M, regex.X, regex.S, regex.L]: + self.assertEqual(repr(type(regex.compile('^pattern$', flag))), + self.PATTERN_CLASS) + + def test_sre_character_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"\%03o" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\%03o0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\%03o8" % i, chr(i) + "8")), + True) + self.assertEqual(bool(regex.match(r"\x%02x" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\x%02x0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\x%02xz" % i, chr(i) + "z")), + True) + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.match(r"\911", "")) + + def test_sre_character_class_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"[\%03o]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o8]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02xz]" % i, chr(i))), True) + + self.assertRaisesRegex(regex.error, self.BAD_OCTAL_ESCAPE, lambda: + regex.match(r"[\911]", "")) + + def test_bug_113254(self): + self.assertEqual(regex.match(r'(a)|(b)', 'b').start(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').end(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').span(1), (-1, -1)) + + def test_bug_527371(self): + # Bug described in patches 527371/672491. + self.assertEqual(regex.match(r'(a)?a','a').lastindex, None) + self.assertEqual(regex.match(r'(a)(b)?b','ab').lastindex, 1) + self.assertEqual(regex.match(r'(?Pa)(?Pb)?b','ab').lastgroup, + 'a') + self.assertEqual(regex.match("(?Pa(b))", "ab").lastgroup, 'a') + self.assertEqual(regex.match("((a))", "a").lastindex, 1) + + def test_bug_545855(self): + # Bug 545855 -- This pattern failed to cause a compile error as it + # should, instead provoking a TypeError. + self.assertRaisesRegex(regex.error, self.BAD_SET, lambda: + regex.compile('foo[a-')) + + def test_bug_418626(self): + # Bugs 418626 at al. -- Testing Greg Chapman's addition of op code + # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of + # pattern '*?' on a long string. + self.assertEqual(regex.match('.*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + self.assertEqual(regex.match('.*?cd', 5000 * 'ab' + 'c' + 5000 * 'ab' + + 'cde').end(0), 20003) + self.assertEqual(regex.match('.*?cd', 20000 * 'abc' + 'de').end(0), + 60001) + # Non-simple '*?' still used to hit the recursion limit, before the + # non-recursive scheme was implemented. + self.assertEqual(regex.search('(a|b)*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + + def test_bug_612074(self): + pat = "[" + regex.escape("\u2039") + "]" + self.assertEqual(regex.compile(pat) and 1, 1) + + def test_stack_overflow(self): + # Nasty cases that used to overflow the straightforward recursive + # implementation of repeated groups. + self.assertEqual(regex.match('(x)*', 50000 * 'x')[1], 'x') + self.assertEqual(regex.match('(x)*y', 50000 * 'x' + 'y')[1], 'x') + self.assertEqual(regex.match('(x)*?y', 50000 * 'x' + 'y')[1], 'x') + + def test_scanner(self): + def s_ident(scanner, token): return token + def s_operator(scanner, token): return "op%s" % token + def s_float(scanner, token): return float(token) + def s_int(scanner, token): return int(token) + + scanner = regex.Scanner([(r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", + s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", + None), ]) + + self.assertEqual(repr(type(scanner.scanner.scanner("").pattern)), + self.PATTERN_CLASS) + + self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', + 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) + + def test_bug_448951(self): + # Bug 448951 (similar to 429357, but with single char match). + # (Also test greedy matches.) + for op in '', '?', '*': + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'z')[:], ('z', + None, None)) + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'a:z')[:], ('a:z', + 'a:', 'a')) + + def test_bug_725106(self): + # Capturing groups in alternatives in repeats. + self.assertEqual(regex.match('^((a)|b)*', 'abc')[:], ('ab', 'b', 'a')) + self.assertEqual(regex.match('^(([ab])|c)*', 'abc')[:], ('abc', 'c', + 'b')) + self.assertEqual(regex.match('^((d)|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)|b)*?c', 'abc')[:], ('abc', 'b', + 'a')) + self.assertEqual(regex.match('^(([ab])|c)*?d', 'abcd')[:], ('abcd', + 'c', 'b')) + self.assertEqual(regex.match('^((d)|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + + def test_bug_725149(self): + # Mark_stack_base restoring before restoring marks. + self.assertEqual(regex.match('(a)(?:(?=(b)*)c)*', 'abb')[:], ('a', 'a', + None)) + self.assertEqual(regex.match('(a)((?!(b)*))*', 'abb')[:], ('a', 'a', + None, None)) + + def test_bug_764548(self): + # Bug 764548, regex.compile() barfs on str/unicode subclasses. + class my_unicode(str): pass + pat = regex.compile(my_unicode("abc")) + self.assertEqual(pat.match("xyz"), None) + + def test_finditer(self): + it = regex.finditer(r":+", "a:b::c:::d") + self.assertEqual([item[0] for item in it], [':', '::', ':::']) + + def test_bug_926075(self): + if regex.compile('bug_926075') is regex.compile(b'bug_926075'): + self.fail() + + def test_bug_931848(self): + pattern = "[\u002E\u3002\uFF0E\uFF61]" + self.assertEqual(regex.compile(pattern).split("a.b.c"), ['a', 'b', + 'c']) + + def test_bug_581080(self): + it = regex.finditer(r"\s", "a b") + self.assertEqual(next(it).span(), (1, 2)) + self.assertRaises(StopIteration, lambda: next(it)) + + scanner = regex.compile(r"\s").scanner("a b") + self.assertEqual(scanner.search().span(), (1, 2)) + self.assertEqual(scanner.search(), None) + + def test_bug_817234(self): + it = regex.finditer(r".*", "asdf") + self.assertEqual(next(it).span(), (0, 4)) + self.assertEqual(next(it).span(), (4, 4)) + self.assertRaises(StopIteration, lambda: next(it)) + + def test_empty_array(self): + # SF buf 1647541. + import array + for typecode in 'bBuhHiIlLfd': + a = array.array(typecode) + self.assertEqual(regex.compile(b"bla").match(a), None) + self.assertEqual(regex.compile(b"").match(a)[1 : ], ()) + + def test_inline_flags(self): + # Bug #1700. + upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Below + lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Below + + p = regex.compile(upper_char, regex.I | regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile(lower_char, regex.I | regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?i)' + upper_char, regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?i)' + lower_char, regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?iu)' + upper_char) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?iu)' + lower_char) + self.assertEqual(bool(p.match(upper_char)), True) + + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(bool(regex.match(r"(?i)a", "A")), True) + self.assertEqual(regex.match(r"a(?i)", "A"), None) + + def test_dollar_matches_twice(self): + # $ matches the end of string, and just before the terminating \n. + pattern = regex.compile('$') + self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + pattern = regex.compile('$', regex.MULTILINE) + self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + def test_bytes_str_mixing(self): + # Mixing str and bytes is disallowed. + pat = regex.compile('.') + bpat = regex.compile(b'.') + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.match(b'b')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.match('b')) + self.assertRaisesRegex(TypeError, self.STR_PAT_BYTES_TEMPL, lambda: + pat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub(b'b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_STR_TEMPL, lambda: + bpat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub('b', 'c')) + + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'(?u)\w')) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'\w', regex.UNICODE | regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?u)\w', regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?a)\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_ascii_and_unicode_flag(self): + # String patterns. + for flags in (0, regex.UNICODE): + pat = regex.compile('\xc0', flags | regex.IGNORECASE) + self.assertEqual(bool(pat.match('\xe0')), True) + pat = regex.compile(r'\w', flags) + self.assertEqual(bool(pat.match('\xe0')), True) + + pat = regex.compile('\xc0', regex.ASCII | regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile('(?a)\xc0', regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'\w', regex.ASCII) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'(?a)\w') + self.assertEqual(pat.match('\xe0'), None) + + # Bytes patterns. + for flags in (0, regex.ASCII): + pat = regex.compile(b'\xc0', flags | regex.IGNORECASE) + self.assertEqual(pat.match(b'\xe0'), None) + pat = regex.compile(br'\w') + self.assertEqual(pat.match(b'\xe0'), None) + + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_subscripting_match(self): + m = regex.match(r'(?\w)', 'xy') + if not m: + self.fail("Failed: expected match but returned None") + elif not m or m[0] != m.group(0) or m[1] != m.group(1): + self.fail("Failed") + if not m: + self.fail("Failed: expected match but returned None") + elif m[:] != ('x', 'x'): + self.fail("Failed: expected \"('x', 'x')\" but got {} instead".format(ascii(m[:]))) + + def test_new_named_groups(self): + m0 = regex.match(r'(?P\w)', 'x') + m1 = regex.match(r'(?\w)', 'x') + if not (m0 and m1 and m0[:] == m1[:]): + self.fail("Failed") + + def test_properties(self): + self.assertEqual(regex.match(b'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?a)\w', b'\xE0'), None) + self.assertEqual(bool(regex.match(r'\w', '\xE0')), True) + + # Dropped the following test. It's not possible to determine what the + # correct result should be in the general case. +# self.assertEqual(bool(regex.match(br'(?L)\w', b'\xE0')), +# b'\xE0'.isalnum()) + + self.assertEqual(bool(regex.match(br'(?L)\d', b'0')), True) + self.assertEqual(bool(regex.match(br'(?L)\s', b' ')), True) + self.assertEqual(bool(regex.match(br'(?L)\w', b'a')), True) + self.assertEqual(regex.match(br'(?L)\d', b'?'), None) + self.assertEqual(regex.match(br'(?L)\s', b'?'), None) + self.assertEqual(regex.match(br'(?L)\w', b'?'), None) + + self.assertEqual(regex.match(br'(?L)\D', b'0'), None) + self.assertEqual(regex.match(br'(?L)\S', b' '), None) + self.assertEqual(regex.match(br'(?L)\W', b'a'), None) + self.assertEqual(bool(regex.match(br'(?L)\D', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\S', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\W', b'?')), True) + + self.assertEqual(bool(regex.match(r'\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'(?i)\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{IsCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Script=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{InCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Block=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:IsCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Script=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:InCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Block=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\P{Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^IsCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Script=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^InCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Block=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\d', '0')), True) + self.assertEqual(bool(regex.match(r'\s', ' ')), True) + self.assertEqual(bool(regex.match(r'\w', 'A')), True) + self.assertEqual(regex.match(r"\d", "?"), None) + self.assertEqual(regex.match(r"\s", "?"), None) + self.assertEqual(regex.match(r"\w", "?"), None) + self.assertEqual(regex.match(r"\D", "0"), None) + self.assertEqual(regex.match(r"\S", " "), None) + self.assertEqual(regex.match(r"\W", "A"), None) + self.assertEqual(bool(regex.match(r'\D', '?')), True) + self.assertEqual(bool(regex.match(r'\S', '?')), True) + self.assertEqual(bool(regex.match(r'\W', '?')), True) + + self.assertEqual(bool(regex.match(r'\p{L}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{L}', 'a')), True) + self.assertEqual(bool(regex.match(r'\p{Lu}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + self.assertEqual(bool(regex.match(r'(?i)a', 'a')), True) + self.assertEqual(bool(regex.match(r'(?i)a', 'A')), True) + + self.assertEqual(bool(regex.match(r'\w', '0')), True) + self.assertEqual(bool(regex.match(r'\w', 'a')), True) + self.assertEqual(bool(regex.match(r'\w', '_')), True) + + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + chars_u = "-09AZaz_\u0393\u03b3" + chars_b = b"-09AZaz_" + word_set = set("Ll Lm Lo Lt Lu Mc Me Mn Nd Nl No Pc".split()) + + tests = [ + (r"\w", chars_u, "09AZaz_\u0393\u03b3"), + (r"[[:word:]]", chars_u, "09AZaz_\u0393\u03b3"), + (r"\W", chars_u, "-"), + (r"[[:^word:]]", chars_u, "-"), + (r"\d", chars_u, "09"), + (r"[[:digit:]]", chars_u, "09"), + (r"\D", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:^digit:]]", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:alpha:]]", chars_u, "AZaz\u0393\u03b3"), + (r"[[:^alpha:]]", chars_u, "-09_"), + (r"[[:alnum:]]", chars_u, "09AZaz\u0393\u03b3"), + (r"[[:^alnum:]]", chars_u, "-_"), + (r"[[:xdigit:]]", chars_u, "09Aa"), + (r"[[:^xdigit:]]", chars_u, "-Zz_\u0393\u03b3"), + (r"\p{InBasicLatin}", "a\xE1", "a"), + (r"\P{InBasicLatin}", "a\xE1", "\xE1"), + (r"(?i)\p{InBasicLatin}", "a\xE1", "a"), + (r"(?i)\P{InBasicLatin}", "a\xE1", "\xE1"), + + (br"(?L)\w", chars_b, b"09AZaz_"), + (br"(?L)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?L)\W", chars_b, b"-"), + (br"(?L)[[:^word:]]", chars_b, b"-"), + (br"(?L)\d", chars_b, b"09"), + (br"(?L)[[:digit:]]", chars_b, b"09"), + (br"(?L)\D", chars_b, b"-AZaz_"), + (br"(?L)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?L)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?L)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?L)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?L)[[:^alnum:]]", chars_b, b"-_"), + (br"(?L)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?L)[[:^xdigit:]]", chars_b, b"-Zz_"), + + (br"(?a)\w", chars_b, b"09AZaz_"), + (br"(?a)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?a)\W", chars_b, b"-"), + (br"(?a)[[:^word:]]", chars_b, b"-"), + (br"(?a)\d", chars_b, b"09"), + (br"(?a)[[:digit:]]", chars_b, b"09"), + (br"(?a)\D", chars_b, b"-AZaz_"), + (br"(?a)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?a)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?a)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?a)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?a)[[:^alnum:]]", chars_b, b"-_"), + (br"(?a)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?a)[[:^xdigit:]]", chars_b, b"-Zz_"), + ] + for pattern, chars, expected in tests: + try: + if chars[ : 0].join(regex.findall(pattern, chars)) != expected: + self.fail("Failed: {}".format(pattern)) + except Exception as e: + self.fail("Failed: {} raised {}".format(pattern, ascii(e))) + + self.assertEqual(bool(regex.match(r"\p{NumericValue=0}", "0")), + True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=1/2}", + "\N{VULGAR FRACTION ONE HALF}")), True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=0.5}", + "\N{VULGAR FRACTION ONE HALF}")), True) + + def test_word_class(self): + self.assertEqual(regex.findall(r"\w+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), + ['\u0939\u093f\u0928\u094d\u0926\u0940']) + self.assertEqual(regex.findall(r"\W+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', ',']) + self.assertEqual(regex.split(r"(?V1)\b", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', + '\u0939\u093f\u0928\u094d\u0926\u0940', ',']) + self.assertEqual(regex.split(r"(?V1)\B", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), ['', ' \u0939', + '\u093f', '\u0928', '\u094d', '\u0926', '\u0940,', '']) + + def test_search_anchor(self): + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + + def test_search_reverse(self): + self.assertEqual(regex.findall(r"(?r).", "abc"), ['c', 'b', 'a']) + self.assertEqual(regex.findall(r"(?r).", "abc", overlapped=True), ['c', + 'b', 'a']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(?r)(.)(-)(.)", "a-b-c", + overlapped=True), [("b", "-", "c"), ("a", "-", "b")]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', 'foo', + '']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + self.assertEqual(regex.findall(r".{2}(?<=\G.*)", "abcd"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"(?r)\G\w{2}", "abcd ef"), []) + self.assertEqual(regex.findall(r"(?r)\w{2}\G", "abcd ef"), ['ef']) + + self.assertEqual(regex.findall(r"q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?V1)q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?r)q*", "qqwe"), ['', '', 'qq', '']) + self.assertEqual(regex.findall(r"(?rV1)q*", "qqwe"), ['', '', 'qq', + '']) + + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=3), ['b', + 'c']) + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=-1), ['b', + 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=3)], ['b', 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=-1)], ['b', 'c']) + + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=3)], ['c', 'b']) + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=-1)], ['c', 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=3), ['c', + 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=-1), + ['c', 'b']) + + self.assertEqual(regex.findall(r"[ab]", "aB", regex.I), ['a', 'B']) + self.assertEqual(regex.findall(r"(?r)[ab]", "aB", regex.I), ['B', 'a']) + + self.assertEqual(regex.findall(r"(?r).{2}", "abc"), ['bc']) + self.assertEqual(regex.findall(r"(?r).{2}", "abc", overlapped=True), + ['bc', 'ab']) + self.assertEqual(regex.findall(r"(\w+) (\w+)", + "first second third fourth fifth"), [('first', 'second'), ('third', + 'fourth')]) + self.assertEqual(regex.findall(r"(?r)(\w+) (\w+)", + "first second third fourth fifth"), [('fourth', 'fifth'), ('second', + 'third')]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc")], + ['bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc", + overlapped=True)], ['bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(\w+) (\w+)", + "first second third fourth fifth")], ['first second', + 'third fourth']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)(\w+) (\w+)", + "first second third fourth fifth")], ['fourth fifth', + 'second third']) + + self.assertEqual(regex.search("abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?r)abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?i)abcdef", "ABCDEF").span(), (0, 6)) + self.assertEqual(regex.search("(?ir)abcdef", "ABCDEF").span(), (0, 6)) + + self.assertEqual(regex.sub(r"(.)", r"\1", "abc"), 'abc') + self.assertEqual(regex.sub(r"(?r)(.)", r"\1", "abc"), 'abc') + + def test_atomic(self): + # Issue 433030. + self.assertEqual(regex.search(r"(?>a*)a", "aa"), None) + + def test_possessive(self): + # Single-character non-possessive. + self.assertEqual(regex.search(r"a?a", "a").span(), (0, 1)) + self.assertEqual(regex.search(r"a*a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a+a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a{1,3}a", "aaa").span(), (0, 3)) + + # Multiple-character non-possessive. + self.assertEqual(regex.search(r"(?:ab)?ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?:ab)*ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab)+ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab){1,3}ab", "ababab").span(), (0, + 6)) + + # Single-character possessive. + self.assertEqual(regex.search(r"a?+a", "a"), None) + self.assertEqual(regex.search(r"a*+a", "aaa"), None) + self.assertEqual(regex.search(r"a++a", "aaa"), None) + self.assertEqual(regex.search(r"a{1,3}+a", "aaa"), None) + + # Multiple-character possessive. + self.assertEqual(regex.search(r"(?:ab)?+ab", "ab"), None) + self.assertEqual(regex.search(r"(?:ab)*+ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab)++ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab){1,3}+ab", "ababab"), None) + + def test_zerowidth(self): + # Issue 3262. + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r"\b", "a b"), ['', 'a', ' ', 'b', + '']) + else: + self.assertEqual(regex.split(r"\b", "a b"), ['a b']) + self.assertEqual(regex.split(r"(?V1)\b", "a b"), ['', 'a', ' ', 'b', + '']) + + # Issue 1647489. + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + else: + self.assertEqual(regex.split("", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], + ['xaxbxc']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['', 'c', 'x', 'b', 'x', 'a', 'x', '']) + else: + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['xaxbxc']) + + self.assertEqual(regex.split("(?V1)", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("(?V1)", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + + self.assertEqual(regex.split("(?rV1)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?rV1)", "xaxbxc")], ['', + 'c', 'x', 'b', 'x', 'a', 'x', '']) + + def test_scoped_and_inline_flags(self): + # Issues 433028, 433024, 433027. + self.assertEqual(regex.search(r"(?i)Ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?i:A)b", "ab").span(), (0, 2)) + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"A(?i)b", "ab"), None) + + self.assertEqual(regex.search(r"(?V0)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?V1)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?-i)Ab", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"(?-i:A)b", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"A(?-i)b", "ab", flags=regex.I).span(), + (0, 2)) + + def test_repeated_repeats(self): + # Issue 2537. + self.assertEqual(regex.search(r"(?:a+)+", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"(?:(?:ab)+c)+", "abcabc").span(), (0, + 6)) + + # Hg issue 286. + self.assertEqual(regex.search(r"(?:a+){2,}", "aaa").span(), (0, 3)) + + def test_lookbehind(self): + self.assertEqual(regex.search(r"123(?<=a\d+)", "a123").span(), (1, 4)) + self.assertEqual(regex.search(r"123(?<=a\d+)", "b123"), None) + self.assertEqual(regex.search(r"123(?= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "xy"), 'y-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "x"), '-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y--') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y-') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "y"), 'y--') + + def test_bug_10328 (self): + # Issue 10328. + pat = regex.compile(r'(?mV0)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + if sys.version_info >= (3, 7, 0): + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + else: + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 1)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + pat = regex.compile(r'(?mV1)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + + def test_overlapped(self): + self.assertEqual(regex.findall(r"..", "abcde"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"..", "abcde", overlapped=True), ['ab', + 'bc', 'cd', 'de']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(.)(-)(.)", "a-b-c", overlapped=True), + [("a", "-", "b"), ("b", "-", "c")]) + + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde")], ['ab', + 'cd']) + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde", + overlapped=True)], ['ab', 'bc', 'cd', 'de']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde")], + ['de', 'bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual([m.groups() for m in regex.finditer(r"(.)(-)(.)", + "a-b-c", overlapped=True)], [("a", "-", "b"), ("b", "-", "c")]) + self.assertEqual([m.groups() for m in regex.finditer(r"(?r)(.)(-)(.)", + "a-b-c", overlapped=True)], [("b", "-", "c"), ("a", "-", "b")]) + + def test_splititer(self): + self.assertEqual(regex.split(r",", "a,b,,c,"), ['a', 'b', '', 'c', '']) + self.assertEqual([m for m in regex.splititer(r",", "a,b,,c,")], ['a', + 'b', '', 'c', '']) + + def test_grapheme(self): + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + def test_word_boundary(self): + text = 'The quick ("brown") fox can\'t jump 32.3 feet, right?' + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'quick', ' ("', 'brown', '") ', 'fox', ' ', 'can', "'", 't', + ' ', 'jump', ' ', '32', '.', '3', ' ', 'feet', ', ', + 'right', '?']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'quick', ' ', '(', '"', 'brown', '"', ')', ' ', 'fox', ' ', + "can't", ' ', 'jump', ' ', '32.3', ' ', 'feet', ',', ' ', + 'right', '?', '']) + + text = "The fox" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'fox', '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'fox', '']) + + text = "can't aujourd'hui l'objectif" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'can', "'", + 't', ' ', 'aujourd', "'", 'hui', ' ', 'l', "'", 'objectif', + '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', "can't", ' ', + "aujourd'hui", ' ', "l'objectif", '']) + + def test_line_boundary(self): + self.assertEqual(regex.findall(r".+", "Line 1\nLine 2\n"), ["Line 1", + "Line 2"]) + self.assertEqual(regex.findall(r".+", "Line 1\rLine 2\r"), + ["Line 1\rLine 2\r"]) + self.assertEqual(regex.findall(r".+", "Line 1\r\nLine 2\r\n"), + ["Line 1\r", "Line 2\r"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\nLine 2\n"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\rLine 2\r"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\r\nLine 2\r\n"), + ["Line 1", "Line 2"]) + + self.assertEqual(regex.search(r"^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"^abc", "\nabc"), None) + self.assertEqual(regex.search(r"^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)^abc", "\nabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "\rabc"), None) + + self.assertEqual(regex.search(r"abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?w)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\r").start(), 0) + + self.assertEqual(regex.search(r"(?m)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?m)^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?mw)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?mw)^abc", "\rabc").start(), 1) + + self.assertEqual(regex.search(r"(?m)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?mw)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\r").start(), 0) + + def test_branch_reset(self): + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "ac").groups(), ('a', + None, 'c')) + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "bc").groups(), (None, + 'b', 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "ac").groups(), ('a', None, 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "bc").groups(), (None, 'b', 'c')) + + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "abd").groups(), ('a', 'b', None, 'd')) + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "acd").groups(), ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "abd").groups(), + ('a', 'b', None, 'd')) + + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "acd").groups(), + ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?|(b)|(b))(d)", "abd").groups(), + ('a', 'b', 'd')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', None, 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + (None, 'b', 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + ('b', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "cde").groups(), ('c', 'd', 'e')) + + # Hg issue 87: Allow duplicate names of groups + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").groups(), ("a", "b", "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").capturesdict(), {"a": ["a"], "b": ["b"]}) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").groups(), ("d", None, "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").capturesdict(), {"a": ["c", "d"], "b": []}) + + def test_set(self): + self.assertEqual(regex.match(r"[a]", "a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a]", "A").span(), (0, 1)) + self.assertEqual(regex.match(r"[a-b]", r"a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a-b]", r"A").span(), (0, 1)) + + self.assertEqual(regex.sub(r"(?V0)([][])", r"-", "a[b]c"), "a-b-c") + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"(?i)[\p{Alpha}]", "A0"), ["A"]) + + self.assertEqual(regex.findall(r"[a\p{Alpha}]", "ab0"), ["a", "b"]) + self.assertEqual(regex.findall(r"[a\P{Alpha}]", "ab0"), ["a", "0"]) + self.assertEqual(regex.findall(r"(?i)[a\p{Alpha}]", "ab0"), ["a", + "b"]) + self.assertEqual(regex.findall(r"(?i)[a\P{Alpha}]", "ab0"), ["a", + "0"]) + + self.assertEqual(regex.findall(r"[a-b\p{Alpha}]", "abC0"), ["a", + "b", "C"]) + self.assertEqual(regex.findall(r"(?i)[a-b\p{Alpha}]", "AbC0"), ["A", + "b", "C"]) + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"[\P{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\p{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\P{Alpha}]", "a0"), ["a"]) + + self.assertEqual("".join(regex.findall(r"[^\d-h]", "a^b12c-h")), + 'a^bc') + self.assertEqual("".join(regex.findall(r"[^\dh]", "a^b12c-h")), + 'a^bc-') + self.assertEqual("".join(regex.findall(r"[^h\s\db]", "a^b 12c-h")), + 'a^c-') + self.assertEqual("".join(regex.findall(r"[^b\w]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^b\S]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^8\d]", "a 1b2")), 'a b') + + all_chars = "".join(chr(c) for c in range(0x100)) + self.assertEqual(len(regex.findall(r"\p{ASCII}", all_chars)), 128) + self.assertEqual(len(regex.findall(r"\p{Letter}", all_chars)), + 117) + self.assertEqual(len(regex.findall(r"\p{Digit}", all_chars)), 10) + + # Set operators + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Digit}]", + all_chars)), 10) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Cc}]", + all_chars)), 33) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Graph}]", + all_chars)), 94) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}--\p{Cc}]", + all_chars)), 95) + self.assertEqual(len(regex.findall(r"[\p{Letter}\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Letter}||\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"\p{HexDigit}", all_chars)), + 22) + self.assertEqual(len(regex.findall(r"(?V1)[\p{HexDigit}~~\p{Digit}]", + all_chars)), 12) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Digit}~~\p{HexDigit}]", + all_chars)), 12) + + self.assertEqual(repr(type(regex.compile(r"(?V0)([][-])"))), + self.PATTERN_CLASS) + self.assertEqual(regex.findall(r"(?V1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?iV1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?V1)[\w--a]","abc"), ["b", "c"]) + self.assertEqual(regex.findall(r"(?iV1)[\w--a]","abc"), ["b", "c"]) + + def test_various(self): + tests = [ + # Test ?P< and ?P= extensions. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with a digit. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + + # Same tests, for the ?P= form. + ('(?Pa)(?P=foo_123', 'aa', '', regex.error, + self.MISSING_RPAREN), + ('(?Pa)(?P=1)', 'aa', '1', ascii('a')), + ('(?Pa)(?P=0)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=-1)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=!)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=foo_124)', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?Pa)', 'a', '1', ascii('a')), + ('(?Pa)(?P=foo_123)', 'aa', '1', ascii('a')), + + # Mal-formed \g in pattern treated as literal for compatibility. + (r'(?a)\ga)\g<1>', 'aa', '1', ascii('a')), + (r'(?a)\g', 'aa', '', ascii(None)), + (r'(?a)\g', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?a)', 'a', '1', ascii('a')), + (r'(?a)\g', 'aa', '1', ascii('a')), + + # Test octal escapes. + ('\\1', 'a', '', regex.error, self.INVALID_GROUP_REF), # Backreference. + ('[\\1]', '\1', '0', "'\\x01'"), # Character. + ('\\09', chr(0) + '9', '0', ascii(chr(0) + '9')), + ('\\141', 'a', '0', ascii('a')), + ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', + '0,11', ascii(('abcdefghijklk9', 'k'))), + + # Test \0 is handled everywhere. + (r'\0', '\0', '0', ascii('\0')), + (r'[\0a]', '\0', '0', ascii('\0')), + (r'[a\0]', '\0', '0', ascii('\0')), + (r'[^a\0]', '\0', '', ascii(None)), + + # Test various letter escapes. + (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'\xff', '\377', '0', ascii(chr(255))), + + # New \x semantics. + (r'\x00ffffffffffffff', '\377', '', ascii(None)), + (r'\x00f', '\017', '', ascii(None)), + (r'\x00fe', '\376', '', ascii(None)), + + (r'\x00ff', '\377', '', ascii(None)), + (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + ('\t\n\v\r\f\a\\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', '0', ascii(chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7))), + (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', '0', + ascii('\t\n\v\r\f\b')), + + (r"^\w+=(\\[\000-\277]|[^\n\\])*", + "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", '0', + ascii("SRC=eval.c g.c blah blah blah \\\\")), + + # Test that . only matches \n in DOTALL mode. + ('a.b', 'acb', '0', ascii('acb')), + ('a.b', 'a\nb', '', ascii(None)), + ('a.*b', 'acc\nccb', '', ascii(None)), + ('a.{4,5}b', 'acc\nccb', '', ascii(None)), + ('a.b', 'a\rb', '0', ascii('a\rb')), + # Changed to positional flags in regex 2023.12.23. + ('a.b(?s)', 'a\nb', '', ascii(None)), + ('(?s)a.b', 'a\nb', '0', ascii('a\nb')), + ('a.*(?s)b', 'acc\nccb', '', ascii(None)), + ('(?s)a.*b', 'acc\nccb', '0', ascii('acc\nccb')), + ('(?s)a.{4,5}b', 'acc\nccb', '0', ascii('acc\nccb')), + + (')', '', '', regex.error, self.TRAILING_CHARS), # Unmatched right bracket. + ('', '', '0', "''"), # Empty pattern. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[\\-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('\\ba\\b', 'a-', '0', ascii('a')), + ('\\ba\\b', '-a', '0', ascii('a')), + ('\\ba\\b', '-a-', '0', ascii('a')), + ('\\by\\b', 'xy', '', ascii(None)), + ('\\by\\b', 'yz', '', ascii(None)), + ('\\by\\b', 'xyz', '', ascii(None)), + ('x\\b', 'xyz', '', ascii(None)), + + ('x\\B', 'xyz', '0', ascii('x')), + ('\\Bz', 'xyz', '0', ascii('z')), + ('z\\B', 'xyz', '', ascii(None)), + ('\\Bx', 'xyz', '', ascii(None)), + ('\\Ba\\B', 'a-', '', ascii(None)), + ('\\Ba\\B', '-a', '', ascii(None)), + ('\\Ba\\B', '-a-', '', ascii(None)), + ('\\By\\B', 'xy', '', ascii(None)), + ('\\By\\B', 'yz', '', ascii(None)), + ('\\By\\b', 'xy', '0', ascii('y')), + + ('\\by\\B', 'yz', '0', ascii('y')), + ('\\By\\B', 'xyz', '0', ascii('y')), + ('ab|cd', 'abc', '0', ascii('ab')), + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('$b', 'b', '', ascii(None)), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('^(.+)?B', 'AB', '1', ascii('A')), + ('(a+).\\1$', 'aaaaa', '0,1', ascii(('aaaaa', 'aa'))), + ('^(a+).\\1$', 'aaaa', '', ascii(None)), + ('(abc)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('([a-c]+)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + + ('(a)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)+\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a).+\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(a)ba*\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(aa|a)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a|aa)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a+)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('([abc]*)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('(a)(b)c|ab', 'ab', '0,1,2', ascii(('ab', None, None))), + + ('(a)+x', 'aaax', '0,1', ascii(('aaax', 'a'))), + ('([ac])+x', 'aacx', '0,1', ascii(('aacx', 'c'))), + ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', '0,1', + ascii(('d:msgs/tdir/sub1/', 'tdir/'))), + ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', + '0,1,2,3', ascii(('track1.title:TBlah blah blah', 'track1', + 'title', 'Blah blah blah'))), + ('([^N]*N)+', 'abNNxyzN', '0,1', ascii(('abNNxyzN', 'xyzN'))), + ('([^N]*N)+', 'abNNxyz', '0,1', ascii(('abNN', 'N'))), + ('([abc]*)x', 'abcx', '0,1', ascii(('abcx', 'abc'))), + ('([abc]*)x', 'abc', '', ascii(None)), + ('([xyz]*)x', 'abcx', '0,1', ascii(('x', ''))), + ('(a)+b|aac', 'aac', '0,1', ascii(('aac', None))), + + # Test symbolic groups. + ('(?Paaa)a', 'aaaa', '', regex.error, self.BAD_GROUP_NAME), + ('(?Paaa)a', 'aaaa', '0,id', ascii(('aaaa', 'aaa'))), + ('(?Paa)(?P=id)', 'aaaa', '0,id', ascii(('aaaa', 'aa'))), + ('(?Paa)(?P=xd)', 'aaaa', '', regex.error, self.UNKNOWN_GROUP), + + # Character properties. + (r"\g", "g", '0', ascii('g')), + (r"\g<1>", "g", '', regex.error, self.INVALID_GROUP_REF), + (r"(.)\g<1>", "gg", '0', ascii('gg')), + (r"(.)\g<1>", "gg", '', ascii(('gg', 'g'))), + (r"\N", "N", '0', ascii('N')), + (r"\N{LATIN SMALL LETTER A}", "a", '0', ascii('a')), + (r"\p", "p", '0', ascii('p')), + (r"\p{Ll}", "a", '0', ascii('a')), + (r"\P", "P", '0', ascii('P')), + (r"\P{Lu}", "p", '0', ascii('p')), + + # All tests from Perl. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{0,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab{1,}bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + + ('ab{1,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{1,3}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{3,4}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{4,5}bc', 'abbbbc', '', ascii(None)), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab{0,1}bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + ('ab{0,1}c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('ab|cd', 'abc', '0', ascii('ab')), + + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('$b', 'b', '', ascii(None)), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('a{1,}b{1,}c', 'aabbabc', '0', ascii('abc')), + ('a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('a.+?c', 'abcabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){0,}', 'ab', '0,1', ascii(('ab', 'b'))), + + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){1,}', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + ('(a+|b){0,1}', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + ('([abc])*d', 'abbbcd', '0,1', ascii(('abbbcd', 'c'))), + ('([abc])*bcd', 'abcd', '0,1', ascii(('abcd', 'a'))), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('((((((((((a))))))))))', 'a', '10', ascii('a')), + ('((((((((((a))))))))))\\10', 'aa', '0', ascii('aa')), + + # Python does not have the same rules for \\41 so this is a syntax error + # ('((((((((((a))))))))))\\41', 'aa', '', ascii(None)), + # ('((((((((((a))))))))))\\41', 'a!', '0', ascii('a!')), + ('((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + ('(?i)((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('(?i)abc', 'ABC', '0', ascii('ABC')), + + ('(?i)abc', 'XBC', '', ascii(None)), + ('(?i)abc', 'AXC', '', ascii(None)), + ('(?i)abc', 'ABX', '', ascii(None)), + ('(?i)abc', 'XABCY', '0', ascii('ABC')), + ('(?i)abc', 'ABABC', '0', ascii('ABC')), + ('(?i)ab*c', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab*?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{0,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + + ('(?i)ab+?bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab+bc', 'ABC', '', ascii(None)), + ('(?i)ab+bc', 'ABQ', '', ascii(None)), + ('(?i)ab{1,}bc', 'ABQ', '', ascii(None)), + ('(?i)ab+bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,3}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{3,4}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{4,5}?bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??bc', 'ABBC', '0', ascii('ABBC')), + + ('(?i)ab??bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab??bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??c', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?c', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABCC', '', ascii(None)), + ('(?i)^abc', 'ABCC', '0', ascii('ABC')), + ('(?i)^abc$', 'AABC', '', ascii(None)), + ('(?i)abc$', 'AABC', '0', ascii('ABC')), + + ('(?i)^', 'ABC', '0', ascii('')), + ('(?i)$', 'ABC', '0', ascii('')), + ('(?i)a.c', 'ABC', '0', ascii('ABC')), + ('(?i)a.c', 'AXC', '0', ascii('AXC')), + ('(?i)a.*?c', 'AXYZC', '0', ascii('AXYZC')), + ('(?i)a.*c', 'AXYZD', '', ascii(None)), + ('(?i)a[bc]d', 'ABC', '', ascii(None)), + ('(?i)a[bc]d', 'ABD', '0', ascii('ABD')), + ('(?i)a[b-d]e', 'ABD', '', ascii(None)), + ('(?i)a[b-d]e', 'ACE', '0', ascii('ACE')), + + ('(?i)a[b-d]', 'AAC', '0', ascii('AC')), + ('(?i)a[-b]', 'A-', '0', ascii('A-')), + ('(?i)a[b-]', 'A-', '0', ascii('A-')), + ('(?i)a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('(?i)a[]b', '-', '', regex.error, self.BAD_SET), + ('(?i)a[', '-', '', regex.error, self.BAD_SET), + ('(?i)a]', 'A]', '0', ascii('A]')), + ('(?i)a[]]b', 'A]B', '0', ascii('A]B')), + ('(?i)a[^bc]d', 'AED', '0', ascii('AED')), + ('(?i)a[^bc]d', 'ABD', '', ascii(None)), + + ('(?i)a[^-b]c', 'ADC', '0', ascii('ADC')), + ('(?i)a[^-b]c', 'A-C', '', ascii(None)), + ('(?i)a[^]b]c', 'A]C', '', ascii(None)), + ('(?i)a[^]b]c', 'ADC', '0', ascii('ADC')), + ('(?i)ab|cd', 'ABC', '0', ascii('AB')), + ('(?i)ab|cd', 'ABCD', '0', ascii('AB')), + ('(?i)()ef', 'DEF', '0,1', ascii(('EF', ''))), + ('(?i)*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)$b', 'B', '', ascii(None)), + + ('(?i)a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('(?i)a\\(b', 'A(B', '', ascii(('A(B',))), + ('(?i)a\\(*b', 'AB', '0', ascii('AB')), + ('(?i)a\\(*b', 'A((B', '0', ascii('A((B')), + ('(?i)a\\\\b', 'A\\B', '0', ascii('A\\B')), + ('(?i)abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('(?i)((a))', 'ABC', '0,1,2', ascii(('A', 'A', 'A'))), + ('(?i)(a)b(c)', 'ABC', '0,1,2', ascii(('ABC', 'A', 'C'))), + ('(?i)a+b+c', 'AABBABC', '0', ascii('ABC')), + + ('(?i)a{1,}b{1,}c', 'AABBABC', '0', ascii('ABC')), + ('(?i)a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('(?i)a.+?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.*?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.{0,5}?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)(a+|b)*', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){0,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)+', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){1,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)?', 'AB', '0,1', ascii(('A', 'A'))), + + ('(?i)(a+|b){0,1}', 'AB', '0,1', ascii(('A', 'A'))), + ('(?i)(a+|b){0,1}?', 'AB', '0,1', ascii(('', None))), + ('(?i))(', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)[^ab]*', 'CDE', '0', ascii('CDE')), + ('(?i)abc', '', '', ascii(None)), + ('(?i)a*', '', '0', ascii('')), + ('(?i)([abc])*d', 'ABBBCD', '0,1', ascii(('ABBBCD', 'C'))), + ('(?i)([abc])*bcd', 'ABCD', '0,1', ascii(('ABCD', 'A'))), + ('(?i)a|b|c|d|e', 'E', '0', ascii('E')), + ('(?i)(a|b|c|d|e)f', 'EF', '0,1', ascii(('EF', 'E'))), + + ('(?i)abcd*efg', 'ABCDEFG', '0', ascii('ABCDEFG')), + ('(?i)ab*', 'XABYABBBZ', '0', ascii('AB')), + ('(?i)ab*', 'XAYABBBZ', '0', ascii('A')), + ('(?i)(ab|cd)e', 'ABCDE', '0,1', ascii(('CDE', 'CD'))), + ('(?i)[abhgefdc]ij', 'HIJ', '0', ascii('HIJ')), + ('(?i)^(ab|cd)e', 'ABCDE', '', ascii(None)), + ('(?i)(abc|)ef', 'ABCDEF', '0,1', ascii(('EF', ''))), + ('(?i)(a|b)c*d', 'ABCD', '0,1', ascii(('BCD', 'B'))), + ('(?i)(ab|ab*)bc', 'ABC', '0,1', ascii(('ABC', 'A'))), + ('(?i)a([bc]*)c*', 'ABC', '0,1', ascii(('ABC', 'BC'))), + + ('(?i)a([bc]*)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]+)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]*)(c+d)', 'ABCD', '0,1,2', ascii(('ABCD', 'B', 'CD'))), + ('(?i)a[bcd]*dcdcde', 'ADCDCDE', '0', ascii('ADCDCDE')), + ('(?i)a[bcd]+dcdcde', 'ADCDCDE', '', ascii(None)), + ('(?i)(ab|a)b*c', 'ABC', '0,1', ascii(('ABC', 'AB'))), + ('(?i)((a)(b)c)(d)', 'ABCD', '1,2,3,4', ascii(('ABC', 'A', 'B', + 'D'))), + ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', '0', ascii('ALPHA')), + ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', '0,1', ascii(('BH', None))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', '0,1,2', ascii(('IJ', 'IJ', + 'J'))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + ('(?i)((((((((((a))))))))))', 'A', '10', ascii('A')), + ('(?i)((((((((((a))))))))))\\10', 'AA', '0', ascii('AA')), + #('(?i)((((((((((a))))))))))\\41', 'AA', '', ascii(None)), + #('(?i)((((((((((a))))))))))\\41', 'A!', '0', ascii('A!')), + ('(?i)(((((((((a)))))))))', 'A', '0', ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', '1', + ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', '1', + ascii('C')), + ('(?i)multiple words of text', 'UH-UH', '', ascii(None)), + + ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', '0', + ascii('MULTIPLE WORDS')), + ('(?i)(.*)c(.*)', 'ABCDE', '0,1,2', ascii(('ABCDE', 'AB', 'DE'))), + ('(?i)\\((.*), (.*)\\)', '(A, B)', '2,1', ascii(('B', 'A'))), + ('(?i)[k]', 'AB', '', ascii(None)), + # ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', ascii(ABCD-$&-\\ABCD)), + # ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', ascii(BC-$1-\\BC)), + ('(?i)a[-]?c', 'AC', '0', ascii('AC')), + ('(?i)(abc)\\1', 'ABCABC', '1', ascii('ABC')), + ('(?i)([a-c]*)\\1', 'ABCABC', '1', ascii('ABC')), + ('a(?!b).', 'abad', '0', ascii('ad')), + ('a(?=d).', 'abad', '0', ascii('ad')), + ('a(?=c|d).', 'abad', '0', ascii('ad')), + + ('a(?:b|c|d)(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)*(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)+?(.)', 'ace', '1', ascii('e')), + ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', '1,2', ascii(('c', 'e'))), + + # Lookbehind: split by : but not if it is escaped by -. + ('(?]*?b', 'a>b', '', ascii(None)), + # Bug 490573: minimizing repeat problem. + (r'^a*?$', 'foo', '', ascii(None)), + # Bug 470582: nested groups problem. + (r'^((a)c)?(ab)$', 'ab', '1,2,3', ascii((None, None, 'ab'))), + # Another minimizing repeat problem (capturing groups in assertions). + ('^([ab]*?)(?=(b)?)c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?!(b))c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?(.){0,2})d", "abcd").captures(1), + ['b', 'c']) + self.assertEqual(regex.search(r"(.)+", "a").captures(1), ['a']) + + def test_guards(self): + m = regex.search(r"(X.*?Y\s*){3}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((3, 21), (12, 15), (16, 18))) + + m = regex.search(r"(X.*?Y\s*){3,}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((0, 21), (12, 15), (16, 18))) + + m = regex.search(r'\d{4}(\s*\w)?\W*((?!\d)\w){2}', "9999XX") + self.assertEqual(m.span(0, 1, 2), ((0, 6), (-1, -1), (5, 6))) + + m = regex.search(r'A\s*?.*?(\n+.*?\s*?){0,2}\(X', 'A\n1\nS\n1 (X') + self.assertEqual(m.span(0, 1), ((0, 10), (5, 8))) + + m = regex.search(r'Derde\s*:', 'aaaaaa:\nDerde:') + self.assertEqual(m.span(), (8, 14)) + m = regex.search(r'Derde\s*:', 'aaaaa:\nDerde:') + self.assertEqual(m.span(), (7, 13)) + + def test_turkic(self): + # Turkish has dotted and dotless I/i. + pairs = "I=i;I=\u0131;i=\u0130" + + all_chars = set() + matching = set() + for pair in pairs.split(";"): + ch1, ch2 = pair.split("=") + all_chars.update((ch1, ch2)) + matching.add((ch1, ch1)) + matching.add((ch1, ch2)) + matching.add((ch2, ch1)) + matching.add((ch2, ch2)) + + for ch1 in all_chars: + for ch2 in all_chars: + m = regex.match(r"(?i)\A" + ch1 + r"\Z", ch2) + if m: + if (ch1, ch2) not in matching: + self.fail("{} matching {}".format(ascii(ch1), + ascii(ch2))) + else: + if (ch1, ch2) in matching: + self.fail("{} not matching {}".format(ascii(ch1), + ascii(ch2))) + + def test_named_lists(self): + options = ["one", "two", "three"] + self.assertEqual(regex.match(r"333\L444", "333one444", + bar=options).group(), "333one444") + self.assertEqual(regex.match(r"(?i)333\L444", "333TWO444", + bar=options).group(), "333TWO444") + self.assertEqual(regex.match(r"333\L444", "333four444", + bar=options), None) + + options = [b"one", b"two", b"three"] + self.assertEqual(regex.match(br"333\L444", b"333one444", + bar=options).group(), b"333one444") + self.assertEqual(regex.match(br"(?i)333\L444", b"333TWO444", + bar=options).group(), b"333TWO444") + self.assertEqual(regex.match(br"333\L444", b"333four444", + bar=options), None) + + self.assertEqual(repr(type(regex.compile(r"3\L4\L+5", + bar=["one", "two", "three"]))), self.PATTERN_CLASS) + + self.assertEqual(regex.findall(r"^\L", "solid QWERT", + options=set(['good', 'brilliant', '+s\\ol[i}d'])), []) + self.assertEqual(regex.findall(r"^\L", "+solid QWERT", + options=set(['good', 'brilliant', '+solid'])), ['+solid']) + + options = ["STRASSE"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["STRASSE", "stress"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["stra\N{LATIN SMALL LETTER SHARP S}e"] + self.assertEqual(regex.match(r"(?fi)\L", "STRASSE", + words=options).span(), (0, 7)) + + options = ["kit"] + self.assertEqual(regex.search(r"(?i)\L", "SKITS", + words=options).span(), (1, 4)) + self.assertEqual(regex.search(r"(?i)\L", + "SK\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}TS", + words=options).span(), (1, 4)) + + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " stra\N{LATIN SMALL LETTER SHARP S}e STRASSE ").span(), (1, 15)) + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " STRASSE stra\N{LATIN SMALL LETTER SHARP S}e ").span(), (1, 15)) + + self.assertEqual(regex.search(r"^\L$", "", options=[]).span(), + (0, 0)) + + def test_fuzzy(self): + # Some tests borrowed from TRE library tests. + self.assertEqual(repr(type(regex.compile('(fou){s,e<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s,e}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1,e<=10}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){s<=1,e<=1,1i+1d<1}'))), + self.PATTERN_CLASS) + + text = 'molasses anaconda foo bar baz smith anderson ' + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<1}', text), + None) + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<2}', + text).span(0, 1), ((9, 17), (9, 17))) + self.assertEqual(regex.search('(ananda){1i+1d<2}', text), None) + self.assertEqual(regex.search(r"(?:\bznacnda){e<=2}", text)[0], + "anaconda") + self.assertEqual(regex.search(r"(?:\bnacnda){e<=2}", text)[0], + "anaconda") + + text = 'anaconda foo bar baz smith anderson' + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e<=5}', text).span(0, + 1), ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=2,d<=2,e<=5}', text).span(0, + 1), ((7, 10), (7, 10))) + self.assertEqual(regex.search('(?e)(fuu){i<=2,d<=2,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e}', text).span(0, 1), + ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e}', text).span(0, + 1), ((9, 10), (9, 10))) + + self.assertEqual(repr(type(regex.compile('(approximate){s<=3,1i+1d<3}'))), + self.PATTERN_CLASS) + + # No cost limit. + self.assertEqual(regex.search('(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?e)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 3), (0, 3))) + self.assertEqual(regex.search('(?b)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((11, 16), (11, 16))) + + # At most two errors. + self.assertEqual(regex.search('(foobar){e<=2}', + 'xirefoabrzlfd').span(0, 1), ((4, 9), (4, 9))) + self.assertEqual(regex.search('(foobar){e<=2}', 'xirefoabzlfd'), None) + + # At most two inserts or substitutions and max two errors total. + self.assertEqual(regex.search('(foobar){i<=2,s<=2,e<=2}', + 'oobargoobaploowap').span(0, 1), ((5, 11), (5, 11))) + + # Find best whole word match for "foobar". + self.assertEqual(regex.search('\\b(foobar){e}\\b', 'zfoobarz').span(0, + 1), ((0, 8), (0, 8))) + self.assertEqual(regex.search('\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?b)\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((15, 21), (15, 21))) + + # Match whole string, allow only 1 error. + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarx').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooxbar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xoobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobax').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'oobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooba').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobarx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarxx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xxfoobar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoxbar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbarx'), None) + + # At most one insert, two deletes, and three substitutions. + # Additionally, deletes cost two and substitutes one, and total + # cost must be less than 4. + self.assertEqual(regex.search('(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((6, 13), (6, + 13))) + self.assertEqual(regex.search('(?b)(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((34, 39), + (34, 39))) + + # Partially fuzzy matches. + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobarzap').span(0, + 1), ((0, 9), (3, 6))) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'fobarzap'), None) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobrzap').span(0, + 1), ((0, 8), (3, 5))) + + text = ('www.cnn.com 64.236.16.20\nwww.slashdot.org 66.35.250.150\n' + 'For useful information, use www.slashdot.org\nthis is demo data!\n') + self.assertEqual(regex.search(r'(?s)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (120, 120))) + self.assertEqual(regex.search(r'(?es)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (93, 100))) + self.assertEqual(regex.search(r'^.*(dot.org){e}.*$', text).span(0, 1), + ((0, 119), (24, 101))) + + # Behaviour is unexpected, but arguably not wrong. It first finds the + # best match, then the best in what follows, etc. + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), [" dog", "cot"]) + self.assertEqual(regex.findall(r"(?e)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog ", "cot"]) + self.assertEqual(regex.findall(r"(?er)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b" dog", b"cot"]) + self.assertEqual(regex.findall(br"(?e)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog ", b"cot"]) + self.assertEqual(regex.findall(br"(?er)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + + self.assertEqual(regex.search(r"(\w+) (\1{e<=1})", "foo fou").groups(), + ("foo", "fou")) + self.assertEqual(regex.search(r"(?r)(\2{e<=1}) (\w+)", + "foo fou").groups(), ("foo", "fou")) + self.assertEqual(regex.search(br"(\w+) (\1{e<=1})", + b"foo fou").groups(), (b"foo", b"fou")) + + self.assertEqual(regex.findall(r"(?:(?:QR)+){e}", "abcde"), ["abcde", + ""]) + self.assertEqual(regex.findall(r"(?:Q+){e}", "abc"), ["abc", ""]) + + # Hg issue 41: = for fuzzy matches + self.assertEqual(regex.match(r"(?:service detection){0[^()]+)|(?R))*\)", "(ab(cd)ef)")[ + : ], ("(ab(cd)ef)", "ef")) + self.assertEqual(regex.search(r"\(((?>[^()]+)|(?R))*\)", + "(ab(cd)ef)").captures(1), ["ab", "cd", "(cd)", "ef"]) + + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)")[ : ], ("(ab(cd)ef)", "ab")) + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)").captures(1), ["ef", "cd", "(cd)", "ab"]) + + self.assertEqual(regex.search(r"\(([^()]+|(?R))*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "e")) + + self.assertEqual(regex.search(r"(?r)\(((?R)|[^()]+)*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "a")) + + self.assertEqual(regex.search(r"(foo(\(((?:(?>[^()]+)|(?2))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + self.assertEqual(regex.search(r"(?r)(foo(\(((?:(?2)|(?>[^()]+))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + rgx = regex.compile(r"""^\s*(<\s*([a-zA-Z:]+)(?:\s*[a-zA-Z:]*\s*=\s*(?:'[^']*'|"[^"]*"))*\s*(/\s*)?>(?:[^<>]*|(?1))*(?(3)|<\s*/\s*\2\s*>))\s*$""") + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), False) + + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('< fooo / >')), True) + # The next regex should and does match. Perl 5.14 agrees. + #self.assertEqual(bool(rgx.search('foo')), False) + self.assertEqual(bool(rgx.search('foo')), False) + + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('')), True) + + def test_copy(self): + # PatternObjects are immutable, therefore there's no need to clone them. + r = regex.compile("a") + self.assertTrue(copy.copy(r) is r) + self.assertTrue(copy.deepcopy(r) is r) + + # MatchObjects are normally mutable because the target string can be + # detached. However, after the target string has been detached, a + # MatchObject becomes immutable, so there's no need to clone it. + m = r.match("a") + self.assertTrue(copy.copy(m) is not m) + self.assertTrue(copy.deepcopy(m) is not m) + + self.assertTrue(m.string is not None) + m2 = copy.copy(m) + m2.detach_string() + self.assertTrue(m.string is not None) + self.assertTrue(m2.string is None) + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.copy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.deepcopy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.copy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.deepcopy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + def test_format(self): + self.assertEqual(regex.subf(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), "foo bar => bar foo") + self.assertEqual(regex.subf(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), "bar foo") + + self.assertEqual(regex.subfn(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), ("foo bar => bar foo", 1)) + self.assertEqual(regex.subfn(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), ("bar foo", 1)) + + self.assertEqual(regex.match(r"(\w+) (\w+)", + "foo bar").expandf("{0} => {2} {1}"), "foo bar => bar foo") + + def test_fullmatch(self): + self.assertEqual(bool(regex.fullmatch(r"abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx", endpos=3)), True) + + self.assertEqual(bool(regex.fullmatch(r"abc", "xabc", pos=1)), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1)), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1, + endpos=4)), True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx", endpos=3)), + True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabc", pos=1)), + True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1)), + False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1, + endpos=4)), True) + + def test_issue_18468(self): + self.assertTypedEqual(regex.sub('y', 'a', 'xyz'), 'xaz') + self.assertTypedEqual(regex.sub('y', StrSubclass('a'), + StrSubclass('xyz')), 'xaz') + self.assertTypedEqual(regex.sub(b'y', b'a', b'xyz'), b'xaz') + self.assertTypedEqual(regex.sub(b'y', BytesSubclass(b'a'), + BytesSubclass(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', bytearray(b'a'), + bytearray(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', memoryview(b'a'), + memoryview(b'xyz')), b'xaz') + + for string in ":a:b::c", StrSubclass(":a:b::c"): + self.assertTypedEqual(regex.split(":", string), ['', 'a', 'b', '', + 'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(":*", string), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + '', '', 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + else: + self.assertTypedEqual(regex.split(":*", string), ['', 'a', 'b', + 'c']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + 'a', ':', 'b', '::', 'c']) + + for string in (b":a:b::c", BytesSubclass(b":a:b::c"), + bytearray(b":a:b::c"), memoryview(b":a:b::c")): + self.assertTypedEqual(regex.split(b":", string), [b'', b'a', b'b', + b'', b'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(b":*", string), [b'', b'', + b'a', b'', b'b', b'', b'c', b'']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'', b'', b'a', b':', b'', b'', b'b', b'::', b'', b'', b'c', + b'', b'']) + else: + self.assertTypedEqual(regex.split(b":*", string), [b'', b'a', + b'b', b'c']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'a', b':', b'b', b'::', b'c']) + + for string in "a:b::c:::d", StrSubclass("a:b::c:::d"): + self.assertTypedEqual(regex.findall(":+", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:+)", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:)(:*)", string), [(":", ""), + (":", ":"), (":", "::")]) + + for string in (b"a:b::c:::d", BytesSubclass(b"a:b::c:::d"), + bytearray(b"a:b::c:::d"), memoryview(b"a:b::c:::d")): + self.assertTypedEqual(regex.findall(b":+", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:+)", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:)(:*)", string), [(b":", + b""), (b":", b":"), (b":", b"::")]) + + for string in 'a', StrSubclass('a'): + self.assertEqual(regex.match('a', string).groups(), ()) + self.assertEqual(regex.match('(a)', string).groups(), ('a',)) + self.assertEqual(regex.match('(a)', string).group(0), 'a') + self.assertEqual(regex.match('(a)', string).group(1), 'a') + self.assertEqual(regex.match('(a)', string).group(1, 1), ('a', + 'a')) + + for string in (b'a', BytesSubclass(b'a'), bytearray(b'a'), + memoryview(b'a')): + self.assertEqual(regex.match(b'a', string).groups(), ()) + self.assertEqual(regex.match(b'(a)', string).groups(), (b'a',)) + self.assertEqual(regex.match(b'(a)', string).group(0), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1, 1), (b'a', + b'a')) + + def test_partial(self): + self.assertEqual(regex.match('ab', 'a', partial=True).partial, True) + self.assertEqual(regex.match('ab', 'a', partial=True).span(), (0, 1)) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).partial, + True) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).span(), (0, + 3)) + self.assertEqual(regex.match(r'cats', 'catch', partial=True), None) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).partial, False) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).span(), (0, 6)) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).partial, True) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).span(), (0, 5)) + + self.assertEqual(regex.match(r'\d{4}$', '1234', partial=True).partial, + False) + + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).partial, False) + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).span(), (0, 4)) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).partial, True) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).span(), (0, 3)) + + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).partial, False) + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).span(), (0, 4)) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).partial, True) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).span(), (0, 3)) + self.assertEqual(regex.match(r'(?fi)\L', 'po\uFB06', + partial=True, words=['POS']), None) + + self.assertEqual(regex.match(r'[a-z]*4R$', 'a', partial=True).span(), + (0, 1)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab4', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4R', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', '4a', partial=True), None) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a44', partial=True), None) + + def test_hg_bugs(self): + # Hg issue 28: regex.compile("(?>b)") causes "TypeError: 'Character' + # object is not subscriptable" + self.assertEqual(bool(regex.compile("(?>b)", flags=regex.V1)), True) + + # Hg issue 29: regex.compile("^((?>\w+)|(?>\s+))*$") causes + # "TypeError: 'GreedyRepeat' object is not iterable" + self.assertEqual(bool(regex.compile(r"^((?>\w+)|(?>\s+))*$", + flags=regex.V1)), True) + + # Hg issue 31: atomic and normal groups in recursive patterns + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?:[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(b(cd)e)f)g)h"), ['(b(cd)e)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual(regex.findall(r"(?r)\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual([m.group() for m in + regex.finditer(r"\((?:[^()]*+|(?0))*\)", "a(b(c(de)fg)h")], + ['(c(de)fg)']) + + # Hg issue 32: regex.search("a(bc)d", "abcd", regex.I|regex.V1) returns + # None + self.assertEqual(regex.search("a(bc)d", "abcd", regex.I | + regex.V1).group(0), "abcd") + + # Hg issue 33: regex.search("([\da-f:]+)$", "E", regex.I|regex.V1) + # returns None + self.assertEqual(regex.search(r"([\da-f:]+)$", "E", regex.I | + regex.V1).group(0), "E") + self.assertEqual(regex.search(r"([\da-f:]+)$", "e", regex.I | + regex.V1).group(0), "e") + + # Hg issue 34: regex.search("^(?=ab(de))(abd)(e)", "abde").groups() + # returns (None, 'abd', 'e') instead of ('de', 'abd', 'e') + self.assertEqual(regex.search("^(?=ab(de))(abd)(e)", "abde").groups(), + ('de', 'abd', 'e')) + + # Hg issue 35: regex.compile("\ ", regex.X) causes "_regex_core.error: + # bad escape" + self.assertEqual(bool(regex.match(r"\ ", " ", flags=regex.X)), True) + + # Hg issue 36: regex.search("^(a|)\1{2}b", "b") returns None + self.assertEqual(regex.search(r"^(a|)\1{2}b", "b").group(0, 1), ('b', + '')) + + # Hg issue 37: regex.search("^(a){0,0}", "abc").group(0,1) returns + # ('a', 'a') instead of ('', None) + self.assertEqual(regex.search("^(a){0,0}", "abc").group(0, 1), ('', + None)) + + # Hg issue 38: regex.search("(?>.*/)b", "a/b") returns None + self.assertEqual(regex.search("(?>.*/)b", "a/b").group(0), "a/b") + + # Hg issue 39: regex.search("((?i)blah)\\s+\\1", "blah BLAH") doesn't + # return None + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"((?i)blah)\s+\1", "blah BLAH"), None) + + # Hg issue 40: regex.search("(\()?[^()]+(?(1)\)|)", "(abcd").group(0) + # returns "bcd" instead of "abcd" + self.assertEqual(regex.search(r"(\()?[^()]+(?(1)\)|)", + "(abcd").group(0), "abcd") + + # Hg issue 42: regex.search("(a*)*", "a", flags=regex.V1).span(1) + # returns (0, 1) instead of (1, 1) + self.assertEqual(regex.search("(a*)*", "a").span(1), (1, 1)) + self.assertEqual(regex.search("(a*)*", "aa").span(1), (2, 2)) + self.assertEqual(regex.search("(a*)*", "aaa").span(1), (3, 3)) + + # Hg issue 43: regex.compile("a(?#xxx)*") causes "_regex_core.error: + # nothing to repeat" + self.assertEqual(regex.search("a(?#xxx)*", "aaa").group(), "aaa") + + # Hg issue 44: regex.compile("(?=abc){3}abc") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("(?=abc){3}abc", "abcabcabc").span(), (0, + 3)) + + # Hg issue 45: regex.compile("^(?:a(?:(?:))+)+") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "a").span(), (0, 1)) + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "aa").span(), (0, 2)) + + # Hg issue 46: regex.compile("a(?x: b c )d") causes + # "_regex_core.error: missing )" + self.assertEqual(regex.search("a(?x: b c )d", "abcd").group(0), "abcd") + + # Hg issue 47: regex.compile("a#comment\n*", flags=regex.X) causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("a#comment\n*", "aaa", + flags=regex.X).group(0), "aaa") + + # Hg issue 48: regex.search("(a(?(1)\\1)){4}", "a"*10, + # flags=regex.V1).group(0,1) returns ('aaaaa', 'a') instead of ('aaaaaaaaaa', 'aaaa') + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){1}", + "aaaaaaaaaa").span(0, 1), ((0, 1), (0, 1))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){2}", + "aaaaaaaaaa").span(0, 1), ((0, 3), (1, 3))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){3}", + "aaaaaaaaaa").span(0, 1), ((0, 6), (3, 6))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){4}", + "aaaaaaaaaa").span(0, 1), ((0, 10), (6, 10))) + + # Hg issue 49: regex.search("(a)(?<=b(?1))", "baz", regex.V1) returns + # None incorrectly + self.assertEqual(regex.search("(?V1)(a)(?<=b(?1))", "baz").group(0), + "a") + + # Hg issue 50: not all keywords are found by named list with + # overlapping keywords when full Unicode casefolding is required + self.assertEqual(regex.findall(r'(?fi)\L', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05', + keywords=['post','pos']), ['POST', 'Post', 'post', 'po\u017Ft', + 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)pos|post', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POS', + 'Pos', 'pos', 'po\u017F', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|pos', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|another', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + + # Hg issue 51: regex.search("((a)(?1)|(?2))", "a", flags=regex.V1) + # returns None incorrectly + self.assertEqual(regex.search("(?V1)((a)(?1)|(?2))", "a").group(0, 1, + 2), ('a', 'a', None)) + + # Hg issue 52: regex.search("(\\1xx|){6}", "xx", + # flags=regex.V1).span(0,1) returns incorrect value + self.assertEqual(regex.search(r"(?V1)(\1xx|){6}", "xx").span(0, 1), + ((0, 2), (2, 2))) + + # Hg issue 53: regex.search("(a|)+", "a") causes MemoryError + self.assertEqual(regex.search("(a|)+", "a").group(0, 1), ("a", "")) + + # Hg issue 54: regex.search("(a|)*\\d", "a"*80) causes MemoryError + self.assertEqual(regex.search(r"(a|)*\d", "a" * 80), None) + + # Hg issue 55: regex.search("^(?:a?b?)*$", "ac") take a very long time. + self.assertEqual(regex.search("^(?:a?b?)*$", "ac"), None) + + # Hg issue 58: bad named character escape sequences like "\\N{1}" + # treats as "N" + self.assertRaisesRegex(regex.error, self.UNDEF_CHAR_NAME, lambda: + regex.compile("\\N{1}")) + + # Hg issue 59: regex.search("\\Z", "a\na\n") returns None incorrectly + self.assertEqual(regex.search("\\Z", "a\na\n").span(0), (4, 4)) + + # Hg issue 60: regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", "xayxay") + # returns None incorrectly + self.assertEqual(regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", + "xayxay").group(0), "xayxay") + + # Hg issue 61: regex.search("[^a]", "A", regex.I).group(0) returns '' + # incorrectly + self.assertEqual(regex.search("(?i)[^a]", "A"), None) + + # Hg issue 63: regex.search("[[:ascii:]]", "\N{KELVIN SIGN}", + # flags=regex.I|regex.V1) doesn't return None + self.assertEqual(regex.search("(?i)[[:ascii:]]", "\N{KELVIN SIGN}"), + None) + + # Hg issue 66: regex.search("((a|b(?1)c){3,5})", "baaaaca", + # flags=regex.V1).groups() returns ('baaaac', 'baaaac') instead of ('aaaa', 'a') + self.assertEqual(regex.search("((a|b(?1)c){3,5})", "baaaaca").group(0, + 1, 2), ('aaaa', 'aaaa', 'a')) + + # Hg issue 71: non-greedy quantifier in lookbehind + self.assertEqual(regex.findall(r"(?<=:\S+ )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S* )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S+? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S*? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + + # Hg issue 73: conditional patterns + self.assertEqual(regex.search(r"(?:fe)?male", "female").group(), + "female") + self.assertEqual([m.group() for m in + regex.finditer(r"(fe)?male: h(?(1)(er)|(is)) (\w+)", + "female: her dog; male: his cat. asdsasda")], ['female: her dog', + 'male: his cat']) + + # Hg issue 78: "Captures" doesn't work for recursive calls + self.assertEqual(regex.search(r'(?\((?:[^()]++|(?&rec))*\))', + 'aaa(((1+0)+1)+1)bbb').captures('rec'), ['(1+0)', '((1+0)+1)', + '(((1+0)+1)+1)']) + + # Hg issue 80: Escape characters throws an exception + self.assertRaisesRegex(regex.error, self.BAD_ESCAPE, lambda: + regex.sub('x', '\\', 'x'), ) + + # Hg issue 82: error range does not work + fz = "(CAGCCTCCCATTTCAGAATATACATCC){1a(?b))', "ab").spans("x"), [(1, + 2), (0, 2)]) + + # Hg issue 91: match.expand is extremely slow + # Check that the replacement cache works. + self.assertEqual(regex.sub(r'(-)', lambda m: m.expand(r'x'), 'a-b-c'), + 'axbxc') + + # Hg issue 94: Python crashes when executing regex updates + # pattern.findall + rx = regex.compile(r'\bt(est){i<2}', flags=regex.V1) + self.assertEqual(rx.search("Some text"), None) + self.assertEqual(rx.findall("Some text"), []) + + # Hg issue 95: 'pos' for regex.error + self.assertRaisesRegex(regex.error, self.MULTIPLE_REPEAT, lambda: + regex.compile(r'.???')) + + # Hg issue 97: behaviour of regex.escape's special_only is wrong + # + # Hg issue 244: Make `special_only=True` the default in + # `regex.escape()` + self.assertEqual(regex.escape('foo!?', special_only=False), 'foo\\!\\?') + self.assertEqual(regex.escape('foo!?', special_only=True), 'foo!\\?') + self.assertEqual(regex.escape('foo!?'), 'foo!\\?') + + self.assertEqual(regex.escape(b'foo!?', special_only=False), b'foo\\!\\?') + self.assertEqual(regex.escape(b'foo!?', special_only=True), + b'foo!\\?') + self.assertEqual(regex.escape(b'foo!?'), b'foo!\\?') + + # Hg issue 100: strange results from regex.search + self.assertEqual(regex.search('^([^z]*(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*(?:WWWi|w))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*?(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + + # Hg issue 101: findall() broken (seems like memory corruption) + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.IGNORECASE | + regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + # Hg issue 106: * operator not working correctly with sub() + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'xx') + else: + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'x') + self.assertEqual(regex.sub('(?V1).*', 'x', 'test'), 'xx') + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|||||||||') + else: + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|t|e|s|t|') + self.assertEqual(regex.sub('(?V1).*?', '|', 'test'), '|||||||||') + + # Hg issue 112: re: OK, but regex: SystemError + self.assertEqual(regex.sub(r'^(@)\n(?!.*?@)(.*)', + r'\1\n==========\n\2', '@\n', flags=regex.DOTALL), '@\n==========\n') + + # Hg issue 109: Edit distance of fuzzy match + self.assertEqual(regex.match(r'(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cat){e<=1}', 'caz').fuzzy_counts, + (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', 'c ats').fuzzy_counts, + (1, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=1}', 'c ats').fuzzy_counts, + (0, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + # Hg issue 115: Infinite loop when processing backreferences + self.assertEqual(regex.findall(r'\bof ([a-z]+) of \1\b', + 'To make use of one of these modules'), []) + + # Hg issue 125: Reference to entire match (\g<0>) in + # Pattern.sub() doesn't work as of 2014.09.22 release. + self.assertEqual(regex.sub(r'x', r'\g<0>', 'x'), 'x') + + # Unreported issue: no such builtin as 'ascii' in Python 2. + self.assertEqual(bool(regex.match(r'a', 'a', regex.DEBUG)), True) + + # Hg issue 131: nested sets behaviour + self.assertEqual(regex.findall(r'(?V1)[[b-e]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[b-e--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[[bcde]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[bcde--cd]', 'abcdef'), ['b', + 'e']) + + # Hg issue 132: index out of range on null property \p{} + self.assertRaisesRegex(regex.error, '^unknown property at position 4$', + lambda: regex.compile(r'\p{}')) + + # Issue 23692. + self.assertEqual(regex.match('(?:()|(?(1)()|z)){2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + self.assertEqual(regex.match('(?:()|(?(1)()|z)){0,2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + + # Hg issue 137: Posix character class :punct: does not seem to be + # supported. + + # Posix compatibility as recommended here: + # http://www.unicode.org/reports/tr18/#Compatibility_Properties + + # Posix in Unicode. + chars = ''.join(chr(c) for c in range(0x10000)) + + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alnum:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alpha:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Alpha}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:ascii:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{InBasicLatin}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:blank:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:cntrl:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{gc=Control}+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:digit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:graph:]]+''', + chars))), ascii(''.join(regex.findall(r'''[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:lower:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Lower}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:print:]]+''', + chars))), ascii(''.join(regex.findall(r'''(?V1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:punct:]]+''', + chars))), + ascii(''.join(regex.findall(r'''(?V1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:space:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Whitespace}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:upper:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Upper}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:word:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:xdigit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9A-Fa-f]+''', + chars)))) + + # Posix in ASCII. + chars = bytes(range(0x100)) + + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alnum:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alpha:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Alpha}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:ascii:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\x00-\x7F]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:blank:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:cntrl:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{gc=Control}+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:digit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:graph:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:lower:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Lower}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:print:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:punct:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:space:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Whitespace}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:upper:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Upper}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:word:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:xdigit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9A-Fa-f]+''', chars)))) + + # Hg issue 138: grapheme anchored search not working properly. + self.assertEqual(ascii(regex.search(r'\X$', 'ab\u2103').group()), + ascii('\u2103')) + + # Hg issue 139: Regular expression with multiple wildcards where first + # should match empty string does not always work. + self.assertEqual(regex.search("([^L]*)([^R]*R)", "LtR").groups(), ('', + 'LtR')) + + # Hg issue 140: Replace with REVERSE and groups has unexpected + # behavior. + self.assertEqual(regex.sub(r'(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.sub(r'(?r)(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(.)', 'x{1}y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(?r)(.)', 'x{1}y', 'ab'), 'xayxby') + + # Hg issue 141: Crash on a certain partial match. + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).span(), (0, 2)) + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).partial, True) + + # Hg issue 143: Partial matches have incorrect span if prefix is '.' + # wildcard. + self.assertEqual(regex.search('OXRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.XRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.{1,3}XRG', 'OOGOX', + partial=True).span(), (1, 5)) + + # Hg issue 144: Latest version problem with matching 'R|R'. + self.assertEqual(regex.match('R|R', 'R').span(), (0, 1)) + + # Hg issue 146: Forced-fail (?!) works improperly in conditional. + self.assertEqual(regex.match(r'(.)(?(1)(?!))', 'xy'), None) + + # Groups cleared after failure. + self.assertEqual(regex.findall(r'(y)?(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + self.assertEqual(regex.findall(r'(y)?+(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + + # Hg issue 147: Fuzzy match can return match points beyond buffer end. + self.assertEqual([m.span() for m in regex.finditer(r'(?i)(?:error){e}', + 'regex failure')], [(0, 5), (5, 10), (10, 13), (13, 13)]) + self.assertEqual([m.span() for m in + regex.finditer(r'(?fi)(?:error){e}', 'regex failure')], [(0, 5), (5, + 10), (10, 13), (13, 13)]) + + # Hg issue 150: Have an option for POSIX-compatible longest match of + # alternates. + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10b12')[0], '10b12') + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10E+12')[0], '10E+12') + + self.assertEqual(regex.search(r'(?p)(\w|ae|oe|ue|ss)', 'ae')[0], 'ae') + self.assertEqual(regex.search(r'(?p)one(self)?(selfsufficient)?', + 'oneselfsufficient')[0], 'oneselfsufficient') + + # Hg issue 151: Request: \K. + self.assertEqual(regex.search(r'(ab\Kcd)', 'abcd').group(0, 1), ('cd', + 'abcd')) + self.assertEqual(regex.findall(r'\w\w\K\w\w', 'abcdefgh'), ['cd', + 'gh']) + self.assertEqual(regex.findall(r'(\w\w\K\w\w)', 'abcdefgh'), ['abcd', + 'efgh']) + + self.assertEqual(regex.search(r'(?r)(ab\Kcd)', 'abcd').group(0, 1), + ('ab', 'abcd')) + self.assertEqual(regex.findall(r'(?r)\w\w\K\w\w', 'abcdefgh'), ['ef', + 'ab']) + self.assertEqual(regex.findall(r'(?r)(\w\w\K\w\w)', 'abcdefgh'), + ['efgh', 'abcd']) + + # Hg issue 152: Request: Request: (?(DEFINE)...). + self.assertEqual(regex.search(r'(?(DEFINE)(?\d+)(?\w+))(?&quant) (?&item)', + '5 elephants')[0], '5 elephants') + + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').group('routine'), None) + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').captures('routine'), ['a']) + + # Hg issue 153: Request: (*SKIP). + self.assertEqual(regex.search(r'12(*FAIL)|3', '123')[0], '3') + self.assertEqual(regex.search(r'(?r)12(*FAIL)|3', '123')[0], '3') + + self.assertEqual(regex.search(r'\d+(*PRUNE)\d', '123'), None) + self.assertEqual(regex.search(r'\d+(?=(*PRUNE))\d', '123')[0], '123') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d(*PRUNE)\d+', '123'), None) + self.assertEqual(regex.search(r'(?r)\d(?<=(*PRUNE))\d+', '123')[0], + '123') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123bcd')[0], '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + # Hg issue 154: Segmentation fault 11 when working with an atomic group + text = """June 30, December 31, 2013 2012 +some words follow: +more words and numbers 1,234,567 9,876,542 +more words and numbers 1,234,567 9,876,542""" + self.assertEqual(len(regex.findall(r'(?2014|2013 ?2012)', text)), 1) + + # Hg issue 156: regression on atomic grouping + self.assertEqual(regex.match('1(?>2)', '12').span(), (0, 2)) + + # Hg issue 157: regression: segfault on complex lookaround + self.assertEqual(regex.match(r'(?V1w)(?=(?=[^A-Z]*+[A-Z])(?=[^a-z]*+[a-z]))(?=\D*+\d)(?=\p{Alphanumeric}*+\P{Alphanumeric})\A(?s:.){8,255}+\Z', + 'AAaa11!!')[0], 'AAaa11!!') + + # Hg issue 158: Group issue with (?(DEFINE)...) + TEST_REGEX = regex.compile(r'''(?smx) +(?(DEFINE) + (? + ^,[^,]+, + ) +) + +# Group 2 is defined on this line +^,([^,]+), + +(?:(?!(?&subcat)[\r\n]+(?&subcat)).)+ +''') + + TEST_DATA = ''' +,Cat 1, +,Brand 1, +some +thing +,Brand 2, +other +things +,Cat 2, +,Brand, +Some +thing +''' + + self.assertEqual([m.span(1, 2) for m in + TEST_REGEX.finditer(TEST_DATA)], [((-1, -1), (2, 7)), ((-1, -1), (54, + 59))]) + + # Hg issue 161: Unexpected fuzzy match results + self.assertEqual(regex.search('(abcdefgh){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 14)) + self.assertEqual(regex.search('(abcdefghi){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 15)) + + # Hg issue 163: allow lookarounds in conditionals. + self.assertEqual(regex.match(r'(?:(?=\d)\d+\b|\w+)', '123abc').span(), + (0, 6)) + self.assertEqual(regex.match(r'(?(?=\d)\d+\b|\w+)', '123abc'), None) + self.assertEqual(regex.search(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you").span(), (7, 10)) + self.assertEqual(regex.findall(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you but I don't hate her either"), ['you', 'her']) + + # Hg issue 180: bug of POSIX matching. + self.assertEqual(regex.search(r'(?p)a*(.*?)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.search(r'(?p)a*(.*)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.sub(r'(?p)a*(.*?)', r'\1', 'aaabbb'), 'bbb') + self.assertEqual(regex.sub(r'(?p)a*(.*)', r'\1', 'aaabbb'), 'bbb') + + # Hg issue 192: Named lists reverse matching doesn't work with + # IGNORECASE and V1 + self.assertEqual(regex.match(r'(?irV0)\L', '21', kw=['1']).span(), + (1, 2)) + self.assertEqual(regex.match(r'(?irV1)\L', '21', kw=['1']).span(), + (1, 2)) + + # Hg issue 193: Alternation and .REVERSE flag. + self.assertEqual(regex.search('a|b', '111a222').span(), (3, 4)) + self.assertEqual(regex.search('(?r)a|b', '111a222').span(), (3, 4)) + + # Hg issue 194: .FULLCASE and Backreference + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?ifr)<\1><(CLI)>', + '').span(), (0, 10)) + + # Hg issue 195: Pickle (or otherwise serial) the compiled regex + r = regex.compile(r'\L', options=['foo', 'bar']) + p = pickle.dumps(r) + r = pickle.loads(p) + self.assertEqual(r.match('foo').span(), (0, 3)) + + # Hg issue 196: Fuzzy matching on repeated regex not working as + # expected + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + + # Hg issue 197: ValueError in regex.compile + self.assertRaises(regex.error, lambda: + regex.compile(b'00000\\0\\00\\^\50\\00\\U05000000')) + + # Hg issue 198: ValueError in regex.compile + self.assertRaises(regex.error, lambda: regex.compile(b"{e', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?ri)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fi)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fri)\L', '22', aa=['121', + '22'])), True) + + # Hg issue 208: Named list, (?ri) flags, Backreference + self.assertEqual(regex.search(r'(?r)\1dog..(?<=(\L))$', 'ccdogcc', + aa=['bcb', 'cc']). span(), (0, 7)) + self.assertEqual(regex.search(r'(?ir)\1dog..(?<=(\L))$', + 'ccdogcc', aa=['bcb', 'cc']). span(), (0, 7)) + + # Hg issue 210: Fuzzy matching and Backreference + self.assertEqual(regex.search(r'(2)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + self.assertEqual(regex.search(r'(\d)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + + # Hg issue 211: Segmentation fault with recursive matches and atomic + # groups + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-))').span(), (0, 5)) + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-)+)'), None) + + # Hg issue 212: Unexpected matching difference with .*? between re and + # regex + self.assertEqual(regex.match(r"x.*? (.).*\1(.*)\1", + 'x |y| z|').span(), (0, 9)) + self.assertEqual(regex.match(r"\.sr (.*?) (.)(.*)\2(.*)\2(.*)", + r'.sr h |||').span(), (0, 35)) + + # Hg issue 213: Segmentation Fault + a = '"\\xF9\\x80\\xAEqdz\\x95L\\xA7\\x89[\\xFE \\x91)\\xF9]\\xDB\'\\x99\\x09=\\x00\\xFD\\x98\\x22\\xDD\\xF1\\xB6\\xC3 Z\\xB6gv\\xA5x\\x93P\\xE1r\\x14\\x8Cv\\x0C\\xC0w\\x15r\\xFFc%" ' + py_regex_pattern = r'''(?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``)))) (?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))))''' + self.assertEqual(bool(regex.search(py_regex_pattern, a)), False) + + # Hg Issue 216: Invalid match when using negative lookbehind and pipe + self.assertEqual(bool(regex.match('foo(?<=foo)', 'foo')), True) + self.assertEqual(bool(regex.match('foo(?.*\!\w*\:.*)|(?P.*))', + '!')), False) + + # Hg issue 220: Misbehavior of group capture with OR operand + self.assertEqual(regex.match(r'\w*(ea)\w*|\w*e(?!a)\w*', + 'easier').groups(), ('ea', )) + + # Hg issue 225: BESTMATCH in fuzzy match not working + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + # Hg issue 226: Error matching at start of string + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).span(), (0, 11)) + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).fuzzy_counts, (0, 8, 0)) + + # Hg issue 227: Incorrect behavior for ? operator with UNICODE + + # IGNORECASE + self.assertEqual(regex.search(r'a?yz', 'xxxxyz', flags=regex.FULLCASE | + regex.IGNORECASE).span(), (4, 6)) + + # Hg issue 230: Is it a bug of (?(DEFINE)...) + self.assertEqual(regex.findall(r'(?:(?![a-d]).)+', 'abcdefgh'), + ['efgh']) + self.assertEqual(regex.findall(r'''(?(DEFINE)(?P(?:(?![a-d]).)))(?&mydef)+''', + 'abcdefgh'), ['efgh']) + + # Hg issue 238: Not fully re backward compatible + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1,3}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){3}', + '"Erm....yes. T..T...Thank you for that."'), []) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){2}', + '"Erm....yes. T..T...Thank you for that."'), [('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T..', 'T', '..'), ('T...', 'T', '...')]) + + # Hg issue 247: Unexpected result with fuzzy matching and lookahead + # expression + self.assertEqual(regex.search(r'(?:ESTONIA(?!\w)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + self.assertEqual(regex.search(r'(?:ESTONIA(?=\W)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + + self.assertEqual(regex.search(r'(?:(?.))(?&func)', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + # Hg issue 271: Comment logic different between Re and Regex + self.assertEqual(bool(regex.match(r'ab(?#comment\))cd', 'abcd')), True) + + # Hg issue 276: Partial Matches yield incorrect matches and bounds + self.assertEqual(regex.search(r'[a-z]+ [a-z]*?:', 'foo bar', + partial=True).span(), (0, 7)) + self.assertEqual(regex.search(r'(?r):[a-z]*? [a-z]+', 'foo bar', + partial=True).span(), (0, 7)) + + # Hg issue 291: Include Script Extensions as a supported Unicode property + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Cakm}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Sylo}', + '\u09EF')), True) + + # Hg issue #293: scx (Script Extensions) property currently matches + # incorrectly + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Latin}', 'P')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Ahom}', 'P')), False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Common}', '4')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Caucasian_Albanian}', '4')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Arabic}', '\u062A')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Balinese}', '\u062A')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Devanagari}', '\u091C')), + True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Batak}', '\u091C')), False) + + # Hg issue 296: Group references are not taken into account when group is reporting the last match + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').captures('x'), + ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').group('x'), + 'b') + + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').captures('x'), ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').group('x'), 'c') + + # Hg issue 299: Partial gives misleading results with "open ended" regexp + self.assertEqual(regex.match('(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('(?r)(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('a*', '', partial=True).partial, False) + self.assertEqual(regex.match('a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('a+', '', partial=True).partial, True) + self.assertEqual(regex.match('a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('a++', '', partial=True).partial, True) + self.assertEqual(regex.match('a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match('(?r)a*', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a++', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match(r"(?:\s*\w+'*)+", 'whatever', partial=True).partial, + False) + + # Hg issue 300: segmentation fault + pattern = ('(?PGGCGTCACACTTTGCTATGCCATAGCAT[AG]TTTATCCATAAGA' + 'TTAGCGGATCCTACCTGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGA' + 'CTATCCGGTATTACCCGGCATGACAGGAGTAAAA){e<=1}' + '(?P[ACGT]{1059}){e<=2}' + '(?PTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAG' + 'TTGTAAGGATATGCCATTCTAGA){e<=0}' + '(?P[ACGT]{18}){e<=0}' + '(?PAGATCGG[CT]AGAGCGTCGTGTAGGGAAAGAGTGTGG){e<=1}') + + text = ('GCACGGCGTCACACTTTGCTATGCCATAGCATATTTATCCATAAGATTAGCGGATCCTACC' + 'TGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGACTATCCGGTATTACC' + 'CGGCATGACAGGAGTAAAAATGGCTATCGACGAAAACAAACAGAAAGCGTTGGCGGCAGCACTGGGC' + 'CAGATTGAGAAACAATTTGGTAAAGGCTCCATCATGCGCCTGGGTGAAGACCGTTCCATGGATGTGG' + 'AAACCATCTCTACCGGTTCGCTTTCACTGGATATCGCGCTTGGGGCAGGTGGTCTGCCGATGGGCCG' + 'TATCGTCGAAATCTACGGACCGGAATCTTCCGGTAAAACCACGCTGACGCTGCAGGTGATCGCCGCA' + 'GCGCAGCGTGAAGGTAAAACCTGTGCGTTTATCGATGCTGAACACGCGCTGGACCCAATCTACGCAC' + 'GTAAACTGGGCGTCGATATCGACAACCTGCTGTGCTCCCAGCCGGACACCGGCGAGCAGGCACTGGA' + 'AATCTGTGACGCCCTGGCGCGTTCTGGCGCAGTAGACGTTATCGTCGTTGACTCCGTGGCGGCACTG' + 'ACGCCGAAAGCGGAAATCGAAGGCGAAATCGGCGACTCTCATATGGGCCTTGCGGCACGTATGATGA' + 'GCCAGGCGATGCGTAAGCTGGCGGGTAACCTGAAGCAGTCCAACACGCTGCTGATCTTCATCAACCC' + 'CATCCGTATGAAAATTGGTGTGATGTTCGGCAACCCGGAAACCACTTACCGGTGGTAACGCGCTGAA' + 'ATTCTACGCCTCTGTTCGTCTCGACATCCGTTAAATCGGCGCGGTGAAAGAGGGCGAAAACGTGGTG' + 'GGTAGCGAAACCCGCGTGAAAGTGGTGAAGAACAAAATCGCTGCGCCGTTTAAACAGGCTGAATTCC' + 'AGATCCTCTACGGCGAAGGTATCAACTTCTACCCCGAACTGGTTGACCTGGGCGTAAAAGAGAAGCT' + 'GATCGAGAAAGCAGGCGCGTGGTACAGCTACAAAGGTGAGAAGATCGGTCAGGGTAAAGCGAATGCG' + 'ACTGCCTGGCTGAAATTTAACCCGGAAACCGCGAAAGAGATCGAGTGAAAAGTACGTGAGTTGCTGC' + 'TGAGCAACCCGAACTCAACGCCGGATTTCTCTGTAGATGATAGCGAAGGCGTAGCAGAAACTAACGA' + 'AGATTTTTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAGTTGT' + 'AAGGATATGCCATTCTAGACAGTTAACACACCAACAAAGATCGGTAGAGCGTCGTGTAGGGAAAGAG' + 'TGTGGTACC') + + m = regex.search(pattern, text, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 1, 0)) + self.assertEqual(m.fuzzy_changes, ([], [1206], [])) + + # Hg issue 306: Fuzzy match parameters not respecting quantifier scope + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfood').fuzzy_counts, (0, 0, 0)) + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfoot').fuzzy_counts, (1, 0, 0)) + + # Hg issue 312: \X not matching graphemes with zero-width-joins + self.assertEqual(regex.findall(r'\X', + '\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466'), + ['\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466']) + + # Hg issue 320: Abnormal performance + self.assertEqual(bool(regex.search(r'(?=a)a', 'a')), True) + self.assertEqual(bool(regex.search(r'(?!b)a', 'a')), True) + + # Hg issue 327: .fullmatch() causes MemoryError + self.assertEqual(regex.fullmatch(r'((\d)*?)*?', '123').span(), (0, 3)) + + # Hg issue 329: Wrong group matches when question mark quantifier is used within a look behind + self.assertEqual(regex.search(r'''(?(DEFINE)(?(?THIS_SHOULD_NOT_MATCHx?)|(?right))).*(?<=(?&mydef).*)''', + 'x right').capturesdict(), {'mydef': ['right'], 'wrong': [], 'right': + ['right']}) + + # Hg issue 338: specifying allowed characters when fuzzy-matching + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:[u]}', 'cut')), True) + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:u}', 'cut')), True) + + # Hg issue 353: fuzzy changes negative indexes + self.assertEqual(regex.search(r'(?be)(AGTGTTCCCCGCGCCAGCGGGGATAAACCG){s<=5,i<=5,d<=5,s+i+d<=10}', + 'TTCCCCGCGCCAGCGGGGATAAACCG').fuzzy_changes, ([], [], [0, 1, 3, 5])) + + # Git issue 364: Contradictory values in fuzzy_counts and fuzzy_changes + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_counts, (1, 0, + 1)) + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_changes, ([0], + [], [1])) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + + # Git issue 370: Confusions about Fuzzy matching behavior + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){e}', + '$ 10,112.111.12').fuzzy_counts, (6, 0, 5)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1,i<=1,d<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=3}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=2}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + + self.assertEqual(regex.fullmatch(r'(?e)(?:0?,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.fullmatch(r'(?e)(?:0??,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + + # Git issue 371: Specifying character set when fuzzy-matching allows characters not in the set + self.assertEqual(regex.search(r"\b(?e)(?:\d{6,20}){i<=5:[\-\\\/]}\b", + "cat dog starting at 00:01132.000. hello world"), None) + + # Git issue 385: Comments in expressions + self.assertEqual(bool(regex.compile('(?#)')), True) + self.assertEqual(bool(regex.compile('(?x)(?#)')), True) + + # Git issue 394: Unexpected behaviour in fuzzy matching with limited character set with IGNORECASE flag + self.assertEqual(regex.findall(r'(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + self.assertEqual(regex.findall(r'(?i)(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + + # Git issue 403: Fuzzy matching with wrong distance (unnecessary substitutions) + self.assertEqual(regex.match(r'^(test){e<=5}$', 'terstin', + flags=regex.B).fuzzy_counts, (0, 3, 0)) + + # Git issue 408: regex fails with a quantified backreference but succeeds with repeated backref + self.assertEqual(bool(regex.match(r"(?:(x*)\1\1\1)*x$", "x" * 5)), True) + self.assertEqual(bool(regex.match(r"(?:(x*)\1{3})*x$", "x" * 5)), True) + + # Git issue 415: Fuzzy character restrictions don't apply to insertions at "right edge" + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'te5t').group(), + 'te5t') + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'tezt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'tes5t').group(), + 'tes5t') + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'teszt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', + 'tes5t').fuzzy_changes, ([], [3], [])) + self.assertEqual(regex.match(r't(es){i<=1,0.*)(?PCTTCC){e<=1}(?P([ACGT]){4,6})(?PCAATACCGACTCCTCACTGTGT){e<=2}(?P([ACGT]){0,6}$)' + + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + m = regex.match(pattern, sequence, flags=regex.ENHANCEMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + # Git issue 433: Disagreement between fuzzy_counts and fuzzy_changes + pattern = r'(?P.*)(?PAACACTGG){e<=1}(?P([AT][CG]){5}){e<=2}(?PGTAACCGAAG){e<=2}(?P([ACGT]){0,6}$)' + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTAACCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([], [], [])) + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTCCCCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (2, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([24, 25], [], [])) + + # Git issue 439: Unmatched groups: sub vs subf + self.assertEqual(regex.sub(r'(test1)|(test2)', r'matched: \1\2', 'test1'), 'matched: test1') + self.assertEqual(regex.subf(r'(test1)|(test2)', r'matched: {1}{2}', 'test1'), 'matched: test1') + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expand(r'matched: \1\2'), 'matched: test1'), + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expandf(r'matched: {1}{2}'), 'matched: test1') + + # Git issue 442: Fuzzy regex matching doesn't seem to test insertions correctly + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having"), None) + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having", flags=regex.I), None) + + # Git issue 467: Scoped inline flags 'a', 'u' and 'L' affect global flags + self.assertEqual(regex.match(r'(?a:\w)\w', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + self.assertEqual(regex.match(r'(?a:\w)(?u:\w)', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + + # Git issue 473: Emoji classified as letter + self.assertEqual(regex.match(r'^\p{LC}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}'), None) + self.assertEqual(regex.match(r'^\p{So}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}').span(), (0, 1)) + + # Git issue 474: regex has no equivalent to `re.Match.groups()` for captures + self.assertEqual(regex.match(r'(.)+', 'abc').allcaptures(), (['abc'], ['a', 'b', 'c'])) + self.assertEqual(regex.match(r'(.)+', 'abc').allspans(), ([(0, 3)], [(0, 1), (1, 2), (2, 3)])) + + # Git issue 477: \v for vertical spacing + self.assertEqual(bool(regex.fullmatch(r'\p{HorizSpace}+', '\t \xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000')), True) + self.assertEqual(bool(regex.fullmatch(r'\p{VertSpace}+', '\n\v\f\r\x85\u2028\u2029')), True) + + # Git issue 479: Segmentation fault when using conditional pattern + self.assertEqual(regex.match(r'(?(?<=A)|(?(?![^B])C|D))', 'A'), None) + self.assertEqual(regex.search(r'(?(?<=A)|(?(?![^B])C|D))', 'A').span(), (1, 1)) + + # Git issue 494: Backtracking failure matching regex ^a?(a?)b?c\1$ against string abca + self.assertEqual(regex.search(r"^a?(a?)b?c\1$", "abca").span(), (0, 4)) + + # Git issue 498: Conditional negative lookahead inside positive lookahead fails to match + self.assertEqual(regex.match(r'(?(?=a).|..)', 'ab').span(), (0, 1)) + self.assertEqual(regex.match(r'(?(?=b).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!a).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!b).|..)', 'ab').span(), (0, 1)) + + def test_fuzzy_ext(self): + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', '-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', '-')), + False) + + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + 'ea')), True) + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + '-a')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssst')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'stss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'tsss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ss-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'sss-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 's-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'st\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'ts\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}s-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + 's-\N{LATIN SMALL LETTER SHARP S}')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + '-s\N{LATIN SMALL LETTER SHARP S}')), False) + + def test_subscripted_captures(self): + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{0} {0[0]} {0[-1]}'), 'abc abc abc') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}'), + 'c a b c c b a') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}'), + 'c a b c c b a') + + self.assertEqual(regex.subf(r'(?P.)+', r'{0} {0[0]} {0[-1]}', + 'abc'), 'abc abc abc') + self.assertEqual(regex.subf(r'(?P.)+', + '{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}', 'abc'), + 'c a b c c b a') + self.assertEqual(regex.subf(r'(?P.)+', + '{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}', 'abc'), + 'c a b c c b a') + + def test_more_zerowidth(self): + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r'\b|:+', 'a::bc'), ['', 'a', '', '', + 'bc', '']) + self.assertEqual(regex.sub(r'\b|:+', '-', 'a::bc'), '-a---bc-') + self.assertEqual(regex.findall(r'\b|:+', 'a::bc'), ['', '', '::', + '', '']) + self.assertEqual([m.span() for m in regex.finditer(r'\b|:+', + 'a::bc')], [(0, 0), (1, 1), (1, 3), (3, 3), (5, 5)]) + self.assertEqual([m.span() for m in regex.finditer(r'(?m)^\s*?$', + 'foo\n\n\nbar')], [(4, 4), (4, 5), (5, 5)]) + + def test_line_ending(self): + self.assertEqual(regex.findall(r'\R', '\r\n\n\x0B\f\r\x85\u2028\u2029'), + ['\r\n', '\n', '\x0B', '\f', '\r', '\x85', '\u2028', '\u2029']) + self.assertEqual(regex.findall(br'\R', b'\r\n\n\x0B\f\r\x85'), [b'\r\n', + b'\n', b'\x0B', b'\f', b'\r']) + +def test_main(): + unittest.main(verbosity=2) + +if __name__ == "__main__": + test_main()