None:
+ if filename:
+ click.echo(f"filename: {filename}")
+ if filepath:
+ click.echo(f"filepath: {filepath}")
+
+
+ if __name__ == "__main__":
+ cli()
+
+:Output:
+ .. code-block::
+
+ $ ./examples/click_validate.py --filename ab
+ filename: ab
+ $ ./examples/click_validate.py --filepath e?g
+ Usage: click_validate.py [OPTIONS]
+ Try 'click_validate.py --help' for help.
+
+ Error: Invalid value for '--filename': [PV1100] invalid characters found: invalids=('?'), value='e?g', platform=Windows
+
+filename/filepath sanitizer for ``click``
+-------------------------------------------
+:Sample Code:
+ .. code-block:: python
+
+ import click
+
+ from pathvalidate.click import sanitize_filename_arg, sanitize_filepath_arg
+
+
+ @click.command()
+ @click.option("--filename", callback=sanitize_filename_arg)
+ @click.option("--filepath", callback=sanitize_filepath_arg)
+ def cli(filename, filepath):
+ if filename:
+ click.echo(f"filename: {filename}")
+ if filepath:
+ click.echo(f"filepath: {filepath}")
+
+
+ if __name__ == "__main__":
+ cli()
+
+:Output:
+ .. code-block::
+
+ $ ./examples/click_sanitize.py --filename a/b
+ filename: ab
+
+For more information
+----------------------
+More examples can be found at
+https://pathvalidate.rtfd.io/en/latest/pages/examples/index.html
+
+Installation
+============
+Installation: pip
+------------------------------
+::
+
+ pip install pathvalidate
+
+Installation: conda
+------------------------------
+::
+
+ conda install -c thombashi pathvalidate
+
+Installation: apt
+------------------------------
+::
+
+ sudo add-apt-repository ppa:thombashi/ppa
+ sudo apt update
+ sudo apt install python3-pathvalidate
+
+
+Dependencies
+============
+Python 3.7+
+no external dependencies.
+
+Documentation
+===============
+https://pathvalidate.rtfd.io/
+
+Sponsors
+====================================
+.. image:: https://avatars.githubusercontent.com/u/44389260?s=48&u=6da7176e51ae2654bcfd22564772ef8a3bb22318&v=4
+ :target: https://github.com/chasbecker
+ :alt: Charles Becker (chasbecker)
+.. image:: https://avatars.githubusercontent.com/u/9919?s=48&v=4
+ :target: https://github.com/github
+ :alt: onetime: GitHub (github)
+.. image:: https://avatars.githubusercontent.com/u/46711571?s=48&u=57687c0e02d5d6e8eeaf9177f7b7af4c9f275eb5&v=4
+ :target: https://github.com/Arturi0
+ :alt: onetime: Arturi0
+.. image:: https://avatars.githubusercontent.com/u/3658062?s=48&v=4
+ :target: https://github.com/b4tman
+ :alt: onetime: Dmitry Belyaev (b4tman)
+
+`Become a sponsor `__
+
diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..472d39f5440216b77e622c081c7c20454a991771
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD
@@ -0,0 +1,35 @@
+pathvalidate-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pathvalidate-3.2.0.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084
+pathvalidate-3.2.0.dist-info/METADATA,sha256=Kc0RTAOHjVPeTIb-Fv8g162B0RcyDzI_Jj2nD9J8Gdk,11747
+pathvalidate-3.2.0.dist-info/RECORD,,
+pathvalidate-3.2.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
+pathvalidate-3.2.0.dist-info/top_level.txt,sha256=AtoiECsrk-xZknk3ruLi-UweWuXhbKeEGDWFwMcK_ks,13
+pathvalidate/__init__.py,sha256=R8x0yEBF3dfwpTlGe1TJZ9XgOmO-tKGoEvpZgNA83Ys,1926
+pathvalidate/__pycache__/__init__.cpython-310.pyc,,
+pathvalidate/__pycache__/__version__.cpython-310.pyc,,
+pathvalidate/__pycache__/_base.cpython-310.pyc,,
+pathvalidate/__pycache__/_common.cpython-310.pyc,,
+pathvalidate/__pycache__/_const.cpython-310.pyc,,
+pathvalidate/__pycache__/_filename.cpython-310.pyc,,
+pathvalidate/__pycache__/_filepath.cpython-310.pyc,,
+pathvalidate/__pycache__/_ltsv.cpython-310.pyc,,
+pathvalidate/__pycache__/_symbol.cpython-310.pyc,,
+pathvalidate/__pycache__/_types.cpython-310.pyc,,
+pathvalidate/__pycache__/argparse.cpython-310.pyc,,
+pathvalidate/__pycache__/click.cpython-310.pyc,,
+pathvalidate/__pycache__/error.cpython-310.pyc,,
+pathvalidate/__pycache__/handler.cpython-310.pyc,,
+pathvalidate/__version__.py,sha256=R8MJHDvfFVYjKEFUDzFulsQ9h1EhLDaHtPVwKRedF-E,201
+pathvalidate/_base.py,sha256=NsynjO1IqYaG6rTbGkMx77OIfcUGSv51jLvMvIyyA1A,7443
+pathvalidate/_common.py,sha256=4JLadI56z-1xST0kfgjtiGMWCkmdlcfdrnZn5wIg_9k,3363
+pathvalidate/_const.py,sha256=UzAu38QxKjZDJEcJ-M99sQDnSpALIK7jJoZizFptiBw,686
+pathvalidate/_filename.py,sha256=YEhwJKEq73kLkqInYjbiagGO22q0iswiISzignbWZXE,17356
+pathvalidate/_filepath.py,sha256=z-QgwCNhy8KY6M8hK8JGeUh3YO-P4_7qAE1p9_LFSXc,18915
+pathvalidate/_ltsv.py,sha256=BuCgH-iLdptUbaghoLCXwk7DQFGBBFjuNGeDv2I0IsM,1203
+pathvalidate/_symbol.py,sha256=8kcG9D7IWCdfw3x18I8qSmA09vpHfQB2suVtMloGu28,2326
+pathvalidate/_types.py,sha256=3CRkyBkMvcPcFPigO-Kr18Z6RgGEgUdLK1cXBg8UjWc,180
+pathvalidate/argparse.py,sha256=z_z7inal8sw2wPwFjsMEMQ2zR3kACdK1qsItocXFf3Y,970
+pathvalidate/click.py,sha256=IvaOB4R7ivR3GNPGaROAzOGBcROWIIsZKADJ08hxab4,1077
+pathvalidate/error.py,sha256=t6ePXdcW3ALnv0c_iEDtjLA8hS7USopJamttH5bmnmQ,7531
+pathvalidate/handler.py,sha256=RDOka3TjLz91yqQdLirQmjhFyEt5PVepk6kmGAAes8o,3268
+pathvalidate/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.41.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7af485495e2b17cae08c465174724a368c6f087a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pathvalidate
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..fff5008e36be4ff7d4994d6f84c62e89c8b1ac8e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/METADATA
@@ -0,0 +1,212 @@
+Metadata-Version: 2.1
+Name: peft
+Version: 0.10.0
+Summary: Parameter-Efficient Fine-Tuning (PEFT)
+Home-page: https://github.com/huggingface/peft
+Author: The HuggingFace team
+Author-email: sourab@huggingface.co
+License: Apache
+Keywords: deep learning
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Requires-Python: >=3.8.0
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: numpy (>=1.17)
+Requires-Dist: packaging (>=20.0)
+Requires-Dist: psutil
+Requires-Dist: pyyaml
+Requires-Dist: torch (>=1.13.0)
+Requires-Dist: transformers
+Requires-Dist: tqdm
+Requires-Dist: accelerate (>=0.21.0)
+Requires-Dist: safetensors
+Requires-Dist: huggingface-hub (>=0.17.0)
+Provides-Extra: dev
+Requires-Dist: black ; extra == 'dev'
+Requires-Dist: hf-doc-builder ; extra == 'dev'
+Requires-Dist: ruff (~=0.2.1) ; extra == 'dev'
+Provides-Extra: docs_specific
+Requires-Dist: black ; extra == 'docs_specific'
+Requires-Dist: hf-doc-builder ; extra == 'docs_specific'
+Provides-Extra: quality
+Requires-Dist: black ; extra == 'quality'
+Requires-Dist: hf-doc-builder ; extra == 'quality'
+Requires-Dist: ruff (~=0.2.1) ; extra == 'quality'
+Provides-Extra: test
+Requires-Dist: black ; extra == 'test'
+Requires-Dist: hf-doc-builder ; extra == 'test'
+Requires-Dist: ruff (~=0.2.1) ; extra == 'test'
+Requires-Dist: pytest ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+Requires-Dist: pytest-xdist ; extra == 'test'
+Requires-Dist: parameterized ; extra == 'test'
+Requires-Dist: datasets ; extra == 'test'
+Requires-Dist: diffusers (<0.21.0) ; extra == 'test'
+Requires-Dist: scipy ; extra == 'test'
+
+
+
+
🤗 PEFT
+
+
State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods
+
+
+Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models.
+
+PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models.
+
+> [!TIP]
+> Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks!
+
+Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work.
+
+## Quickstart
+
+Install PEFT from pip:
+
+```bash
+pip install peft
+```
+
+Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters!
+
+```python
+from transformers import AutoModelForSeq2SeqLM
+from peft import get_peft_config, get_peft_model, LoraConfig, TaskType
+model_name_or_path = "bigscience/mt0-large"
+tokenizer_name_or_path = "bigscience/mt0-large"
+
+peft_config = LoraConfig(
+ task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
+)
+
+model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
+model = get_peft_model(model, peft_config)
+model.print_trainable_parameters()
+"trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
+```
+
+To load a PEFT model for inference:
+
+```py
+from peft import AutoPeftModelForCausalLM
+from transformers import AutoTokenizer
+import torch
+
+model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora").to("cuda")
+tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+
+model.eval()
+inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt")
+
+outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50)
+print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
+
+"Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla."
+```
+
+## Why you should use PEFT
+
+There are many benefits of using PEFT but the main one is the huge savings in compute and storage, making PEFT applicable to many different use cases.
+
+### High performance on consumer hardware
+
+Consider the memory requirements for training the following models on the [ought/raft/twitter_complaints](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) dataset with an A100 80GB GPU with more than 64GB of CPU RAM.
+
+| Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading |
+| --------- | ---- | ---- | ---- |
+| bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU |
+| bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU |
+| bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU |
+
+With LoRA you can fully finetune a 12B parameter model that would've otherwise run out of memory on the 80GB GPU, and comfortably fit and train a 3B parameter model. When you look at the 3B parameter model's performance, it is comparable to a fully finetuned model at a fraction of the GPU memory.
+
+| Submission Name | Accuracy |
+| --------- | ---- |
+| Human baseline (crowdsourced) | 0.897 |
+| Flan-T5 | 0.892 |
+| lora-t0-3b | 0.863 |
+
+> [!TIP]
+> The bigscience/T0_3B model performance isn't optimized in the table above. You can squeeze even more performance out of it by playing around with the input instruction templates, LoRA hyperparameters, and other training related hyperparameters. The final checkpoint size of this model is just 19MB compared to 11GB of the full bigscience/T0_3B model. Learn more about the advantages of finetuning with PEFT in this [blog post](https://www.philschmid.de/fine-tune-flan-t5-peft).
+
+### Quantization
+
+Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference.
+
+* Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post.
+* Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset).
+
+### Save compute and storage
+
+PEFT can help you save storage by avoiding full finetuning of models on each of downstream task or dataset. In many cases, you're only finetuning a very small fraction of a model's parameters and each checkpoint is only a few MBs in size (instead of GBs). These smaller PEFT adapters demonstrate performance comparable to a fully finetuned model. If you have many datasets, you can save a lot of storage with a PEFT model and not have to worry about catastrophic forgetting or overfitting the backbone or base model.
+
+## PEFT integrations
+
+PEFT is widely supported across the Hugging Face ecosystem because of the massive efficiency it brings to training and inference.
+
+### Diffusers
+
+The iterative diffusion process consumes a lot of memory which can make it difficult to train. PEFT can help reduce the memory requirements and reduce the storage size of the final model checkpoint. For example, consider the memory required for training a Stable Diffusion model with LoRA on an A100 80GB GPU with more than 64GB of CPU RAM. The final model checkpoint size is only 8.8MB!
+
+| Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing |
+| --------- | ---- | ---- | ---- |
+| CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU |
+
+> [!TIP]
+> Take a look at the [examples/lora_dreambooth/train_dreambooth.py](examples/lora_dreambooth/train_dreambooth.py) training script to try training your own Stable Diffusion model with LoRA, and play around with the [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth) Space which is running on a T4 instance. Learn more about the PEFT integration in Diffusers in this [tutorial](https://huggingface.co/docs/peft/main/en/tutorial/peft_integrations#diffusers).
+
+### Accelerate
+
+[Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources.
+
+### TRL
+
+PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading:
+
+* [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM.
+* [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews.
+* [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning.
+
+## Model support
+
+Use this [Space](https://stevhliu-peft-methods.hf.space) or check out the [docs](https://huggingface.co/docs/peft/main/en/index) to find which models officially support a PEFT method out of the box. Even if you don't see a model listed below, you can manually configure the model config to enable PEFT for a model. Read the [New transformers architecture](https://huggingface.co/docs/peft/main/en/developer_guides/custom_models#new-transformers-architectures) guide to learn how.
+
+## Contribute
+
+If you would like to contribute to PEFT, please check out our [contribution guide](https://huggingface.co/docs/peft/developer_guides/contributing).
+
+## Citing 🤗 PEFT
+
+To use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry.
+
+```bibtex
+@Misc{peft,
+ title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},
+ author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan},
+ howpublished = {\url{https://github.com/huggingface/peft}},
+ year = {2022}
+}
+```
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..d8a9ed582072de582eb8b79a65d99d2b4da41700
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/RECORD
@@ -0,0 +1,157 @@
+peft-0.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+peft-0.10.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
+peft-0.10.0.dist-info/METADATA,sha256=rVMHyukd8k_nEqs19E5ZPaco9GqrkHrI1m6LGW3ZTc4,13215
+peft-0.10.0.dist-info/RECORD,,
+peft-0.10.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+peft-0.10.0.dist-info/top_level.txt,sha256=DOKoqHe6fr-A3g26PPWvf5bHLy8fHKhflUO5xzJJEUY,5
+peft/__init__.py,sha256=kL2rBDEEvPAJ2CpnXnwOa5IXylk9a7KRTJXSAedS6ZE,2514
+peft/__pycache__/__init__.cpython-310.pyc,,
+peft/__pycache__/auto.cpython-310.pyc,,
+peft/__pycache__/config.cpython-310.pyc,,
+peft/__pycache__/helpers.cpython-310.pyc,,
+peft/__pycache__/import_utils.cpython-310.pyc,,
+peft/__pycache__/mapping.cpython-310.pyc,,
+peft/__pycache__/mixed_model.cpython-310.pyc,,
+peft/__pycache__/peft_model.cpython-310.pyc,,
+peft/auto.py,sha256=03OkfIQ_xhziupLLVP5wFYt8pmMOxwjCD5ik1vuNHVk,6568
+peft/config.py,sha256=W6kNwSDRySZyV_1tBkQzim6PMmM6s4VAQCd8d__Q_fE,10908
+peft/helpers.py,sha256=ycZIsMacCi_-WLhsQsWsiweFr3iS8EIVIBDYfcQYBc0,4423
+peft/import_utils.py,sha256=PefA5udnA0LhTOjLvsPsLuDIOQsbvdtm_klzxsNUmAA,2382
+peft/mapping.py,sha256=e7Ei7fcLwVNAuiiZmbitmifqvKTrusnQkd1nzdQh-Vs,5916
+peft/mixed_model.py,sha256=swX0HCZedYXDPASwTqdNn3FGBhzqhRCWRKdHDLg6pV4,16572
+peft/peft_model.py,sha256=kWt8pvOAzFsv4D8uMh1B8jmkA0PEAlSDpf5hul8KxMQ,89505
+peft/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+peft/tuners/__init__.py,sha256=N75esatM3Zuf1BmfjWqnCQSv-IevaNtKsWanFPMzq8g,1535
+peft/tuners/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc,,
+peft/tuners/__pycache__/tuners_utils.cpython-310.pyc,,
+peft/tuners/adalora/__init__.py,sha256=iT-UeicKex8znLIwoOr642odMVues4KZneN_e1Hz6MQ,1298
+peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc,,
+peft/tuners/adalora/__pycache__/config.cpython-310.pyc,,
+peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc,,
+peft/tuners/adalora/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/adalora/__pycache__/model.cpython-310.pyc,,
+peft/tuners/adalora/bnb.py,sha256=QZeSa84I564wHq-PjNKiX3IUWi9jYW1t_AlVxroNhCs,5606
+peft/tuners/adalora/config.py,sha256=P0yXBb0OfTXEUrgfqCkVauAVIOm4Q8UfCNlgt4mdY2Q,2659
+peft/tuners/adalora/gptq.py,sha256=nsehwVjP9e1H6rXwuS3dID8lqEQbRXK98Hogzsm8MeE,2719
+peft/tuners/adalora/layer.py,sha256=z6dXGLNNastpo0xZWoX83R9lNxOwJJRehbhdx6KUhZM,14352
+peft/tuners/adalora/model.py,sha256=XqcyNkxUfXwAz60QnMK60gfXhE-ZhgKUXUpCHbDYlko,15302
+peft/tuners/adaption_prompt/__init__.py,sha256=gOenS_7j87CTvgb_xPaW2K8-PfVwHndb3EHdofV3BGU,794
+peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc,,
+peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc,,
+peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc,,
+peft/tuners/adaption_prompt/config.py,sha256=_wECOCgJBVEg1YDOqpuMt6Glo3OglFwdHfsnfQJZdms,2803
+peft/tuners/adaption_prompt/layer.py,sha256=GCBW_2eEGmUOZS_FyQ-HgbbWOJkCLCKq89B3quKTKLs,5833
+peft/tuners/adaption_prompt/model.py,sha256=wUcz0G8UBXCchOWP14zj-iKbbd707WR-ti6wj60vuck,7464
+peft/tuners/adaption_prompt/utils.py,sha256=W9qL4LLgwyM0jMcSvQOy1DzEzADVRzL61Gh9Hx_rRvw,5708
+peft/tuners/ia3/__init__.py,sha256=72N2yY-5xQRq5cxmxLkm73JwNF3AZUhG8hdJ4g834uU,1185
+peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc,,
+peft/tuners/ia3/__pycache__/config.cpython-310.pyc,,
+peft/tuners/ia3/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/ia3/__pycache__/model.cpython-310.pyc,,
+peft/tuners/ia3/bnb.py,sha256=GvoVKQS0t4u5Xnq-isEMLi8m_VgZdBZPc6LrVEZdHkM,4668
+peft/tuners/ia3/config.py,sha256=K27xLoIIo_meHt--iE9H-kfmLUycXNP2eV2cBQJKERE,5152
+peft/tuners/ia3/layer.py,sha256=4CMLwAKN4N2dvuWeabVmRxqAAoqznHnYcEdUrMYLrEI,13661
+peft/tuners/ia3/model.py,sha256=5wL76PXcMV197yH7u0Sa1L3zCjTVWEvSN0VFysG34fo,16597
+peft/tuners/loha/__init__.py,sha256=lHzf9_TXvsqW6SCVnMjeMsgqD6Vdv8c6L2Ob6joeoio,777
+peft/tuners/loha/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/loha/__pycache__/config.cpython-310.pyc,,
+peft/tuners/loha/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/loha/__pycache__/model.cpython-310.pyc,,
+peft/tuners/loha/config.py,sha256=aKe2JegDgAj4l9oV-SQr1UWo80LivP45jM35fpXOEfc,6037
+peft/tuners/loha/layer.py,sha256=BtPrDc1NtpcYj2_NW0s25lC4S--jVvMFyVwBnURCBFM,15606
+peft/tuners/loha/model.py,sha256=boeR0HwZu8F1EJEM1zb7gs5jaAKO_aKXu1TsIUwDuH8,4205
+peft/tuners/lokr/__init__.py,sha256=s8o_tkrdXpN7ZUXEMUoFxXxmg8_Fj9pRHlDqn16Ie8c,777
+peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/lokr/__pycache__/config.cpython-310.pyc,,
+peft/tuners/lokr/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/lokr/__pycache__/model.cpython-310.pyc,,
+peft/tuners/lokr/config.py,sha256=OworzmruIz37l3BsQ0C-7sULL7Es4AUMuKbK_wWvjrk,6305
+peft/tuners/lokr/layer.py,sha256=D8VJ6MXnexS6ieFvXGciJObj_NMjPvHG1-7zrDBc1Tk,15469
+peft/tuners/lokr/model.py,sha256=s6OMs72nQJXZnDSjQW3HX9UrprktIP0p61l2ymsZgQY,4259
+peft/tuners/lora/__init__.py,sha256=3J6qLQHY9iZbkJ5bgD0uDUtXicIn4cDQi8z5lnjTF4s,1288
+peft/tuners/lora/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/awq.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/bnb.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/config.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/gptq.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/model.cpython-310.pyc,,
+peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc,,
+peft/tuners/lora/aqlm.py,sha256=jqxOpFyTIOtRJJe5YzcF0vxIPSUdEW4kAvnzkollhS4,3320
+peft/tuners/lora/awq.py,sha256=Y7_sgl3WorxAtppyCuzLmCH62FaW_IQE6RwQ_YoH4PA,3693
+peft/tuners/lora/bnb.py,sha256=fFfpehst9kRqQR2xBF2o6dCLmxIIaiHVMFXO-rLCsH4,22415
+peft/tuners/lora/config.py,sha256=02hCQLNna2kD8p5PxdTKx1TpwUuZj0hqWeOm8GAoacI,17322
+peft/tuners/lora/gptq.py,sha256=dMFl7H157DkHVD7sjX9BE8nUGdMXQG46VUqn5wYWn6o,3969
+peft/tuners/lora/layer.py,sha256=sK10nz2sUO8YMKrOue48-Ey7qjlNPRpGaoEFnCCKcNo,47596
+peft/tuners/lora/model.py,sha256=E2bSxGxK-tRkEhcM969fg-5Wf5BTH7QlEKPNDsDHvpo,34612
+peft/tuners/lora/tp_layer.py,sha256=xfvluzH_7D5cPicodHJPILNvpPL4F2Qpfi5SOaH1GmM,8829
+peft/tuners/lycoris_utils.py,sha256=JAL2tBhcYTHy8V74TGEKXceTxlAIhWa21xFH800av70,16629
+peft/tuners/mixed/__init__.py,sha256=see7CbOiJJ-E8W1QSSBtIK4oQfFnkJOUVU5xckCYyzw,706
+peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/mixed/__pycache__/model.cpython-310.pyc,,
+peft/tuners/mixed/model.py,sha256=eLqWsKvNcTii3Kgcbqnp2jv4_LtQwixRJd9Ayn_vDhA,15006
+peft/tuners/multitask_prompt_tuning/__init__.py,sha256=_Vm3xHt9URAAAYg_XtA-dWJC-vsNs39hW8Bntym3L-I,819
+peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc,,
+peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc,,
+peft/tuners/multitask_prompt_tuning/config.py,sha256=5G9MbSB9m2DCxhwYE5RZ6ZvSfQvt641k_PBz7XA3Ac8,2446
+peft/tuners/multitask_prompt_tuning/model.py,sha256=GczbBvzlm_VKrRrB1I-MW9Exm6S9-hQIGCdA5me2eoU,4659
+peft/tuners/oft/__init__.py,sha256=B9DhFqanuJW0VSZa9Fwe-lUBw4UczAv3BvsQaKu8whE,771
+peft/tuners/oft/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/oft/__pycache__/config.cpython-310.pyc,,
+peft/tuners/oft/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/oft/__pycache__/model.cpython-310.pyc,,
+peft/tuners/oft/config.py,sha256=mLSje1UnT2Aor1R7miKsKCKKMeQPc8vUC3SBUCkw4EE,5826
+peft/tuners/oft/layer.py,sha256=cWfJguXCVTzworW8XW9ndp54iSVLrPUVMoTmQ2PZoiM,15591
+peft/tuners/oft/model.py,sha256=DJzJIrKd0MGqCpoNQXSstC5cfACsXt6IK4C8y8Rc-yg,3695
+peft/tuners/p_tuning/__init__.py,sha256=XUQbMT5GTftYRCGNkWiWfAJRBDuv8pWnALRW67HtEDU,801
+peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc,,
+peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc,,
+peft/tuners/p_tuning/config.py,sha256=konpCRWcmzPQgDkvz0gA-xC856poHpyYAWXqES7qBzk,2110
+peft/tuners/p_tuning/model.py,sha256=rv2mbmPOArAefeqhJ09Oz7XNGaScWGRT1hgKlrfhfAw,5575
+peft/tuners/poly/__init__.py,sha256=yCiWTO7o2rWmvAM4CNKyllvIvtE6_QnHuEjbKx7jhgI,759
+peft/tuners/poly/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/poly/__pycache__/config.cpython-310.pyc,,
+peft/tuners/poly/__pycache__/layer.cpython-310.pyc,,
+peft/tuners/poly/__pycache__/model.cpython-310.pyc,,
+peft/tuners/poly/__pycache__/router.cpython-310.pyc,,
+peft/tuners/poly/config.py,sha256=FqAjrybzzD4eATX_jG-GBr4QeBDOVE3xTcXKCrQn-dw,3783
+peft/tuners/poly/layer.py,sha256=ArvY2sOnI6xVI-mglFfevOKKKXJPhs9xtFivZkOWWnA,6890
+peft/tuners/poly/model.py,sha256=VtzkgWnjnbAAAf1WMNMUoPUTq6-W58kKIJRWmlinMBk,6782
+peft/tuners/poly/router.py,sha256=qIX6jEI_FSb5Rr5YhIWo8Cr5ZiAmDCiydT7lTuEICp8,2800
+peft/tuners/prefix_tuning/__init__.py,sha256=KCQnKQoFTfx0M2HkQANF36PO6y62oD24gmkdGnpWrXc,723
+peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc,,
+peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc,,
+peft/tuners/prefix_tuning/config.py,sha256=JUCnCtime_7V5iJyX6iBk_8SeYCpJ_Mw-RGYmgzz2Qg,1386
+peft/tuners/prefix_tuning/model.py,sha256=FyE_EBtvvA9bZDX-GRWN6s0MQtcUe57PLD4qRT2ACww,3007
+peft/tuners/prompt_tuning/__init__.py,sha256=HyENdH-fwAAdwsEqFX4Pd4MLfCAZxBiXGqyjxvU4CAE,765
+peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc,,
+peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc,,
+peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc,,
+peft/tuners/prompt_tuning/config.py,sha256=RbfmCdnbQ6t0PTwo8NcnVbkQKyHIJaljptRRhRClhJ0,3472
+peft/tuners/prompt_tuning/model.py,sha256=rCCft8hRjVtQIR536hUfhT1ICBuDqWBGSTMPGNdAwTg,3623
+peft/tuners/tuners_utils.py,sha256=iwIWbOXidvE_FlviBi5rZlbjBIRNxIraAwxnw9hXyds,31283
+peft/utils/__init__.py,sha256=HA_S0vk8wMs_85BHxyX3pfdvWhl9F3XxvVzUOTsg42Q,1947
+peft/utils/__pycache__/__init__.cpython-310.pyc,,
+peft/utils/__pycache__/constants.cpython-310.pyc,,
+peft/utils/__pycache__/integrations.cpython-310.pyc,,
+peft/utils/__pycache__/loftq_utils.cpython-310.pyc,,
+peft/utils/__pycache__/merge_utils.cpython-310.pyc,,
+peft/utils/__pycache__/other.cpython-310.pyc,,
+peft/utils/__pycache__/peft_types.cpython-310.pyc,,
+peft/utils/__pycache__/save_and_load.cpython-310.pyc,,
+peft/utils/constants.py,sha256=6B6JZDmrqqCPtyU8IA8uaXRkibv-IBID6i19CV3A7C4,5751
+peft/utils/integrations.py,sha256=yFJVpki0wJWqHXADdKbMX4Rpa-jkpl0mJ9l5DAVcwTI,2568
+peft/utils/loftq_utils.py,sha256=ouhJhFkrVl31Q0TvMHWLqmiTkFOixq-aSmzappRa4k4,17180
+peft/utils/merge_utils.py,sha256=IAd6DlbPowxAEiuC5OaMwg9hmdO6068DOU18CJ00VIU,9905
+peft/utils/other.py,sha256=gDmVrWGxGCK0KlwBt3JueE7O28uhoPuCbANX0QcbU_8,24273
+peft/utils/peft_types.py,sha256=CIvazG4k07ONOacJKIS8iigYmRj-GxG37v7q57ZzKE0,2147
+peft/utils/save_and_load.py,sha256=5GKT_XQ1xbz9XLUErnmolBWcAgHU_EHaMx6kmoL60fI,14820
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dc89ba063e639dfa24fd7f53340bc368adbebb7a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/peft-0.10.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+peft
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__init__.py b/env-llmeval/lib/python3.10/site-packages/regex/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb06564ab033a2b0b501f7f41efb169dacd1f801
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/regex/__init__.py
@@ -0,0 +1,3 @@
+from .regex import *
+from . import regex
+__all__ = regex.__all__
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf0ea57ae28aef9d0ea5f884605a19381b78b7fd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1683a04c61229d2cf1ad6fd0e2f1d609c1b7938e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1729cfc7bc9e57f37fed836c6680d6e2c8a3db1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b68ba84e64140751d1d4ed7dfedf25dc1e57c72
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py b/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2314f80011256ca81cfb38f65b3485e1956e94e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/regex/_regex_core.py
@@ -0,0 +1,4492 @@
+#
+# Secret Labs' Regular Expression Engine core module
+#
+# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
+#
+# This version of the SRE library can be redistributed under CNRI's
+# Python 1.6 license. For any other use, please contact Secret Labs
+# AB (info@pythonware.com).
+#
+# Portions of this engine have been developed in cooperation with
+# CNRI. Hewlett-Packard provided funding for 1.6 integration and
+# other compatibility work.
+#
+# 2010-01-16 mrab Python front-end re-written and extended
+
+import enum
+import string
+import unicodedata
+from collections import defaultdict
+
+import regex._regex as _regex
+
+__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH",
+ "F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P",
+ "POSIX", "R", "REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE",
+ "V0", "VERSION0", "V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error",
+ "Scanner", "RegexFlag"]
+
+# The regex exception.
+class error(Exception):
+ """Exception raised for invalid regular expressions.
+
+ Attributes:
+
+ msg: The unformatted error message
+ pattern: The regular expression pattern
+ pos: The position in the pattern where compilation failed, or None
+ lineno: The line number where compilation failed, unless pos is None
+ colno: The column number where compilation failed, unless pos is None
+ """
+
+ def __init__(self, message, pattern=None, pos=None):
+ newline = '\n' if isinstance(pattern, str) else b'\n'
+ self.msg = message
+ self.pattern = pattern
+ self.pos = pos
+ if pattern is not None and pos is not None:
+ self.lineno = pattern.count(newline, 0, pos) + 1
+ self.colno = pos - pattern.rfind(newline, 0, pos)
+
+ message = "{} at position {}".format(message, pos)
+
+ if newline in pattern:
+ message += " (line {}, column {})".format(self.lineno,
+ self.colno)
+
+ Exception.__init__(self, message)
+
+# The exception for when a positional flag has been turned on in the old
+# behaviour.
+class _UnscopedFlagSet(Exception):
+ pass
+
+# The exception for when parsing fails and we want to try something else.
+class ParseError(Exception):
+ pass
+
+# The exception for when there isn't a valid first set.
+class _FirstSetError(Exception):
+ pass
+
+# Flags.
+class RegexFlag(enum.IntFlag):
+ A = ASCII = 0x80 # Assume ASCII locale.
+ B = BESTMATCH = 0x1000 # Best fuzzy match.
+ D = DEBUG = 0x200 # Print parsed pattern.
+ E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first
+ # fuzzy match.
+ F = FULLCASE = 0x4000 # Unicode full case-folding.
+ I = IGNORECASE = 0x2 # Ignore case.
+ L = LOCALE = 0x4 # Assume current 8-bit locale.
+ M = MULTILINE = 0x8 # Make anchors look for newline.
+ P = POSIX = 0x10000 # POSIX-style matching (leftmost longest).
+ R = REVERSE = 0x400 # Search backwards.
+ S = DOTALL = 0x10 # Make dot match newline.
+ U = UNICODE = 0x20 # Assume Unicode locale.
+ V0 = VERSION0 = 0x2000 # Old legacy behaviour.
+ V1 = VERSION1 = 0x100 # New enhanced behaviour.
+ W = WORD = 0x800 # Default Unicode word breaks.
+ X = VERBOSE = 0x40 # Ignore whitespace and comments.
+ T = TEMPLATE = 0x1 # Template (present because re module has it).
+
+ def __repr__(self):
+ if self._name_ is not None:
+ return 'regex.%s' % self._name_
+
+ value = self._value_
+ members = []
+ negative = value < 0
+
+ if negative:
+ value = ~value
+
+ for m in self.__class__:
+ if value & m._value_:
+ value &= ~m._value_
+ members.append('regex.%s' % m._name_)
+
+ if value:
+ members.append(hex(value))
+
+ res = '|'.join(members)
+
+ if negative:
+ if len(members) > 1:
+ res = '~(%s)' % res
+ else:
+ res = '~%s' % res
+
+ return res
+
+ __str__ = object.__str__
+
+globals().update(RegexFlag.__members__)
+
+DEFAULT_VERSION = VERSION1
+
+_ALL_VERSIONS = VERSION0 | VERSION1
+_ALL_ENCODINGS = ASCII | LOCALE | UNICODE
+
+# The default flags for the various versions.
+DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE}
+
+# The mask for the flags.
+GLOBAL_FLAGS = (_ALL_VERSIONS | BESTMATCH | DEBUG | ENHANCEMATCH | POSIX |
+ REVERSE)
+SCOPED_FLAGS = (FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE |
+ _ALL_ENCODINGS)
+
+ALPHA = frozenset(string.ascii_letters)
+DIGITS = frozenset(string.digits)
+ALNUM = ALPHA | DIGITS
+OCT_DIGITS = frozenset(string.octdigits)
+HEX_DIGITS = frozenset(string.hexdigits)
+SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""])
+NAMED_CHAR_PART = ALNUM | frozenset(" -")
+PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.")
+SET_OPS = ("||", "~~", "&&", "--")
+
+# The width of the code words inside the regex engine.
+BYTES_PER_CODE = _regex.get_code_size()
+BITS_PER_CODE = BYTES_PER_CODE * 8
+
+# The repeat count which represents infinity.
+UNLIMITED = (1 << BITS_PER_CODE) - 1
+
+# The regular expression flags.
+REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE,
+ "i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "p": POSIX, "r": REVERSE,
+ "s": DOTALL, "u": UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x":
+ VERBOSE}
+
+# The case flags.
+CASE_FLAGS = FULLCASE | IGNORECASE
+NOCASE = 0
+FULLIGNORECASE = FULLCASE | IGNORECASE
+
+FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE
+
+CASE_FLAGS_COMBINATIONS = {0: 0, FULLCASE: 0, IGNORECASE: IGNORECASE,
+ FULLIGNORECASE: FULLIGNORECASE}
+
+# The number of digits in hexadecimal escapes.
+HEX_ESCAPES = {"x": 2, "u": 4, "U": 8}
+
+# The names of the opcodes.
+OPCODES = """
+FAILURE
+SUCCESS
+ANY
+ANY_ALL
+ANY_ALL_REV
+ANY_REV
+ANY_U
+ANY_U_REV
+ATOMIC
+BOUNDARY
+BRANCH
+CALL_REF
+CHARACTER
+CHARACTER_IGN
+CHARACTER_IGN_REV
+CHARACTER_REV
+CONDITIONAL
+DEFAULT_BOUNDARY
+DEFAULT_END_OF_WORD
+DEFAULT_START_OF_WORD
+END
+END_OF_LINE
+END_OF_LINE_U
+END_OF_STRING
+END_OF_STRING_LINE
+END_OF_STRING_LINE_U
+END_OF_WORD
+FUZZY
+GRAPHEME_BOUNDARY
+GREEDY_REPEAT
+GROUP
+GROUP_CALL
+GROUP_EXISTS
+KEEP
+LAZY_REPEAT
+LOOKAROUND
+NEXT
+PROPERTY
+PROPERTY_IGN
+PROPERTY_IGN_REV
+PROPERTY_REV
+PRUNE
+RANGE
+RANGE_IGN
+RANGE_IGN_REV
+RANGE_REV
+REF_GROUP
+REF_GROUP_FLD
+REF_GROUP_FLD_REV
+REF_GROUP_IGN
+REF_GROUP_IGN_REV
+REF_GROUP_REV
+SEARCH_ANCHOR
+SET_DIFF
+SET_DIFF_IGN
+SET_DIFF_IGN_REV
+SET_DIFF_REV
+SET_INTER
+SET_INTER_IGN
+SET_INTER_IGN_REV
+SET_INTER_REV
+SET_SYM_DIFF
+SET_SYM_DIFF_IGN
+SET_SYM_DIFF_IGN_REV
+SET_SYM_DIFF_REV
+SET_UNION
+SET_UNION_IGN
+SET_UNION_IGN_REV
+SET_UNION_REV
+SKIP
+START_OF_LINE
+START_OF_LINE_U
+START_OF_STRING
+START_OF_WORD
+STRING
+STRING_FLD
+STRING_FLD_REV
+STRING_IGN
+STRING_IGN_REV
+STRING_REV
+FUZZY_EXT
+"""
+
+# Define the opcodes in a namespace.
+class Namespace:
+ pass
+
+OP = Namespace()
+for i, op in enumerate(OPCODES.split()):
+ setattr(OP, op, i)
+
+def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5):
+ """Make room in the given cache.
+
+ Args:
+ cache_dict: The cache dictionary to modify.
+ args_dict: The dictionary of named list args used by patterns.
+ max_length: Maximum # of entries in cache_dict before it is shrunk.
+ divisor: Cache will shrink to max_length - 1/divisor*max_length items.
+ """
+ # Toss out a fraction of the entries at random to make room for new ones.
+ # A random algorithm was chosen as opposed to simply cache_dict.popitem()
+ # as popitem could penalize the same regular expression repeatedly based
+ # on its internal hash value. Being random should spread the cache miss
+ # love around.
+ cache_keys = tuple(cache_dict.keys())
+ overage = len(cache_keys) - max_length
+ if overage < 0:
+ # Cache is already within limits. Normally this should not happen
+ # but it could due to multithreading.
+ return
+
+ number_to_toss = max_length // divisor + overage
+
+ # The import is done here to avoid a circular dependency.
+ import random
+ if not hasattr(random, 'sample'):
+ # Do nothing while resolving the circular dependency:
+ # re->random->warnings->tokenize->string->re
+ return
+
+ for doomed_key in random.sample(cache_keys, number_to_toss):
+ try:
+ del cache_dict[doomed_key]
+ except KeyError:
+ # Ignore problems if the cache changed from another thread.
+ pass
+
+ # Rebuild the arguments and locale-sensitivity dictionaries.
+ args_dict.clear()
+ sensitivity_dict = {}
+ for pattern, pattern_type, flags, args, default_version, locale in tuple(cache_dict):
+ args_dict[pattern, pattern_type, flags, default_version, locale] = args
+ try:
+ sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern]
+ except KeyError:
+ pass
+
+ locale_sensitive.clear()
+ locale_sensitive.update(sensitivity_dict)
+
+def _fold_case(info, string):
+ "Folds the case of a string."
+ flags = info.flags
+ if (flags & _ALL_ENCODINGS) == 0:
+ flags |= info.guess_encoding
+
+ return _regex.fold_case(flags, string)
+
+def is_cased_i(info, char):
+ "Checks whether a character is cased."
+ return len(_regex.get_all_cases(info.flags, char)) > 1
+
+def is_cased_f(flags, char):
+ "Checks whether a character is cased."
+ return len(_regex.get_all_cases(flags, char)) > 1
+
+def _compile_firstset(info, fs):
+ "Compiles the firstset for the pattern."
+ reverse = bool(info.flags & REVERSE)
+ fs = _check_firstset(info, reverse, fs)
+ if not fs:
+ return []
+
+ # Compile the firstset.
+ return fs.compile(reverse)
+
+def _check_firstset(info, reverse, fs):
+ "Checks the firstset for the pattern."
+ if not fs or None in fs:
+ return None
+
+ # If we ignore the case, for simplicity we won't build a firstset.
+ members = set()
+ case_flags = NOCASE
+ for i in fs:
+ if isinstance(i, Character) and not i.positive:
+ return None
+
+# if i.case_flags:
+# if isinstance(i, Character):
+# if is_cased_i(info, i.value):
+# return []
+# elif isinstance(i, SetBase):
+# return []
+ case_flags |= i.case_flags
+ members.add(i.with_flags(case_flags=NOCASE))
+
+ if case_flags == (FULLCASE | IGNORECASE):
+ return None
+
+ # Build the firstset.
+ fs = SetUnion(info, list(members), case_flags=case_flags & ~FULLCASE,
+ zerowidth=True)
+ fs = fs.optimise(info, reverse, in_set=True)
+
+ return fs
+
+def _flatten_code(code):
+ "Flattens the code from a list of tuples."
+ flat_code = []
+ for c in code:
+ flat_code.extend(c)
+
+ return flat_code
+
+def make_case_flags(info):
+ "Makes the case flags."
+ flags = info.flags & CASE_FLAGS
+
+ # Turn off FULLCASE if ASCII is turned on.
+ if info.flags & ASCII:
+ flags &= ~FULLCASE
+
+ return flags
+
+def make_character(info, value, in_set=False):
+ "Makes a character literal."
+ if in_set:
+ # A character set is built case-sensitively.
+ return Character(value)
+
+ return Character(value, case_flags=make_case_flags(info))
+
+def make_ref_group(info, name, position):
+ "Makes a group reference."
+ return RefGroup(info, name, position, case_flags=make_case_flags(info))
+
+def make_string_set(info, name):
+ "Makes a string set."
+ return StringSet(info, name, case_flags=make_case_flags(info))
+
+def make_property(info, prop, in_set):
+ "Makes a property."
+ if in_set:
+ return prop
+
+ return prop.with_flags(case_flags=make_case_flags(info))
+
+def _parse_pattern(source, info):
+ "Parses a pattern, eg. 'a|b|c'."
+ branches = [parse_sequence(source, info)]
+ while source.match("|"):
+ branches.append(parse_sequence(source, info))
+
+ if len(branches) == 1:
+ return branches[0]
+ return Branch(branches)
+
+def parse_sequence(source, info):
+ "Parses a sequence, eg. 'abc'."
+ sequence = [None]
+ case_flags = make_case_flags(info)
+ while True:
+ saved_pos = source.pos
+ ch = source.get()
+ if ch in SPECIAL_CHARS:
+ if ch in ")|":
+ # The end of a sequence. At the end of the pattern ch is "".
+ source.pos = saved_pos
+ break
+ elif ch == "\\":
+ # An escape sequence outside a set.
+ sequence.append(parse_escape(source, info, False))
+ elif ch == "(":
+ # A parenthesised subpattern or a flag.
+ element = parse_paren(source, info)
+ if element is None:
+ case_flags = make_case_flags(info)
+ else:
+ sequence.append(element)
+ elif ch == ".":
+ # Any character.
+ if info.flags & DOTALL:
+ sequence.append(AnyAll())
+ elif info.flags & WORD:
+ sequence.append(AnyU())
+ else:
+ sequence.append(Any())
+ elif ch == "[":
+ # A character set.
+ sequence.append(parse_set(source, info))
+ elif ch == "^":
+ # The start of a line or the string.
+ if info.flags & MULTILINE:
+ if info.flags & WORD:
+ sequence.append(StartOfLineU())
+ else:
+ sequence.append(StartOfLine())
+ else:
+ sequence.append(StartOfString())
+ elif ch == "$":
+ # The end of a line or the string.
+ if info.flags & MULTILINE:
+ if info.flags & WORD:
+ sequence.append(EndOfLineU())
+ else:
+ sequence.append(EndOfLine())
+ else:
+ if info.flags & WORD:
+ sequence.append(EndOfStringLineU())
+ else:
+ sequence.append(EndOfStringLine())
+ elif ch in "?*+{":
+ # Looks like a quantifier.
+ counts = parse_quantifier(source, info, ch)
+ if counts:
+ # It _is_ a quantifier.
+ apply_quantifier(source, info, counts, case_flags, ch,
+ saved_pos, sequence)
+ sequence.append(None)
+ else:
+ # It's not a quantifier. Maybe it's a fuzzy constraint.
+ constraints = parse_fuzzy(source, info, ch, case_flags)
+ if constraints:
+ # It _is_ a fuzzy constraint.
+ apply_constraint(source, info, constraints, case_flags,
+ saved_pos, sequence)
+ sequence.append(None)
+ else:
+ # The element was just a literal.
+ sequence.append(Character(ord(ch),
+ case_flags=case_flags))
+ else:
+ # A literal.
+ sequence.append(Character(ord(ch), case_flags=case_flags))
+ else:
+ # A literal.
+ sequence.append(Character(ord(ch), case_flags=case_flags))
+
+ sequence = [item for item in sequence if item is not None]
+ return Sequence(sequence)
+
+def apply_quantifier(source, info, counts, case_flags, ch, saved_pos,
+ sequence):
+ element = sequence.pop()
+ if element is None:
+ if sequence:
+ raise error("multiple repeat", source.string, saved_pos)
+ raise error("nothing to repeat", source.string, saved_pos)
+
+ if isinstance(element, (GreedyRepeat, LazyRepeat, PossessiveRepeat)):
+ raise error("multiple repeat", source.string, saved_pos)
+
+ min_count, max_count = counts
+ saved_pos = source.pos
+ ch = source.get()
+ if ch == "?":
+ # The "?" suffix that means it's a lazy repeat.
+ repeated = LazyRepeat
+ elif ch == "+":
+ # The "+" suffix that means it's a possessive repeat.
+ repeated = PossessiveRepeat
+ else:
+ # No suffix means that it's a greedy repeat.
+ source.pos = saved_pos
+ repeated = GreedyRepeat
+
+ # Ignore the quantifier if it applies to a zero-width item or the number of
+ # repeats is fixed at 1.
+ if not element.is_empty() and (min_count != 1 or max_count != 1):
+ element = repeated(element, min_count, max_count)
+
+ sequence.append(element)
+
+def apply_constraint(source, info, constraints, case_flags, saved_pos,
+ sequence):
+ element = sequence.pop()
+ if element is None:
+ raise error("nothing for fuzzy constraint", source.string, saved_pos)
+
+ # If a group is marked as fuzzy then put all of the fuzzy part in the
+ # group.
+ if isinstance(element, Group):
+ element.subpattern = Fuzzy(element.subpattern, constraints)
+ sequence.append(element)
+ else:
+ sequence.append(Fuzzy(element, constraints))
+
+_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)}
+
+def parse_quantifier(source, info, ch):
+ "Parses a quantifier."
+ q = _QUANTIFIERS.get(ch)
+ if q:
+ # It's a quantifier.
+ return q
+
+ if ch == "{":
+ # Looks like a limited repeated element, eg. 'a{2,3}'.
+ counts = parse_limited_quantifier(source)
+ if counts:
+ return counts
+
+ return None
+
+def is_above_limit(count):
+ "Checks whether a count is above the maximum."
+ return count is not None and count >= UNLIMITED
+
+def parse_limited_quantifier(source):
+ "Parses a limited quantifier."
+ saved_pos = source.pos
+ min_count = parse_count(source)
+ if source.match(","):
+ max_count = parse_count(source)
+
+ # No minimum means 0 and no maximum means unlimited.
+ min_count = int(min_count or 0)
+ max_count = int(max_count) if max_count else None
+ else:
+ if not min_count:
+ source.pos = saved_pos
+ return None
+
+ min_count = max_count = int(min_count)
+
+ if not source.match ("}"):
+ source.pos = saved_pos
+ return None
+
+ if is_above_limit(min_count) or is_above_limit(max_count):
+ raise error("repeat count too big", source.string, saved_pos)
+
+ if max_count is not None and min_count > max_count:
+ raise error("min repeat greater than max repeat", source.string,
+ saved_pos)
+
+ return min_count, max_count
+
+def parse_fuzzy(source, info, ch, case_flags):
+ "Parses a fuzzy setting, if present."
+ saved_pos = source.pos
+
+ if ch != "{":
+ return None
+
+ constraints = {}
+ try:
+ parse_fuzzy_item(source, constraints)
+ while source.match(","):
+ parse_fuzzy_item(source, constraints)
+ except ParseError:
+ source.pos = saved_pos
+ return None
+
+ if source.match(":"):
+ constraints["test"] = parse_fuzzy_test(source, info, case_flags)
+
+ if not source.match("}"):
+ raise error("expected }", source.string, source.pos)
+
+ return constraints
+
+def parse_fuzzy_item(source, constraints):
+ "Parses a fuzzy setting item."
+ saved_pos = source.pos
+ try:
+ parse_cost_constraint(source, constraints)
+ except ParseError:
+ source.pos = saved_pos
+
+ parse_cost_equation(source, constraints)
+
+def parse_cost_constraint(source, constraints):
+ "Parses a cost constraint."
+ saved_pos = source.pos
+ ch = source.get()
+ if ch in ALPHA:
+ # Syntax: constraint [("<=" | "<") cost]
+ constraint = parse_constraint(source, constraints, ch)
+
+ max_inc = parse_fuzzy_compare(source)
+
+ if max_inc is None:
+ # No maximum cost.
+ constraints[constraint] = 0, None
+ else:
+ # There's a maximum cost.
+ cost_pos = source.pos
+ max_cost = parse_cost_limit(source)
+
+ # Inclusive or exclusive limit?
+ if not max_inc:
+ max_cost -= 1
+
+ if max_cost < 0:
+ raise error("bad fuzzy cost limit", source.string, cost_pos)
+
+ constraints[constraint] = 0, max_cost
+ elif ch in DIGITS:
+ # Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost
+ source.pos = saved_pos
+
+ # Minimum cost.
+ cost_pos = source.pos
+ min_cost = parse_cost_limit(source)
+
+ min_inc = parse_fuzzy_compare(source)
+ if min_inc is None:
+ raise ParseError()
+
+ constraint = parse_constraint(source, constraints, source.get())
+
+ max_inc = parse_fuzzy_compare(source)
+ if max_inc is None:
+ raise ParseError()
+
+ # Maximum cost.
+ cost_pos = source.pos
+ max_cost = parse_cost_limit(source)
+
+ # Inclusive or exclusive limits?
+ if not min_inc:
+ min_cost += 1
+ if not max_inc:
+ max_cost -= 1
+
+ if not 0 <= min_cost <= max_cost:
+ raise error("bad fuzzy cost limit", source.string, cost_pos)
+
+ constraints[constraint] = min_cost, max_cost
+ else:
+ raise ParseError()
+
+def parse_cost_limit(source):
+ "Parses a cost limit."
+ cost_pos = source.pos
+ digits = parse_count(source)
+
+ try:
+ return int(digits)
+ except ValueError:
+ pass
+
+ raise error("bad fuzzy cost limit", source.string, cost_pos)
+
+def parse_constraint(source, constraints, ch):
+ "Parses a constraint."
+ if ch not in "deis":
+ raise ParseError()
+
+ if ch in constraints:
+ raise ParseError()
+
+ return ch
+
+def parse_fuzzy_compare(source):
+ "Parses a cost comparator."
+ if source.match("<="):
+ return True
+ elif source.match("<"):
+ return False
+ else:
+ return None
+
+def parse_cost_equation(source, constraints):
+ "Parses a cost equation."
+ if "cost" in constraints:
+ raise error("more than one cost equation", source.string, source.pos)
+
+ cost = {}
+
+ parse_cost_term(source, cost)
+ while source.match("+"):
+ parse_cost_term(source, cost)
+
+ max_inc = parse_fuzzy_compare(source)
+ if max_inc is None:
+ raise ParseError()
+
+ max_cost = int(parse_count(source))
+
+ if not max_inc:
+ max_cost -= 1
+
+ if max_cost < 0:
+ raise error("bad fuzzy cost limit", source.string, source.pos)
+
+ cost["max"] = max_cost
+
+ constraints["cost"] = cost
+
+def parse_cost_term(source, cost):
+ "Parses a cost equation term."
+ coeff = parse_count(source)
+ ch = source.get()
+ if ch not in "dis":
+ raise ParseError()
+
+ if ch in cost:
+ raise error("repeated fuzzy cost", source.string, source.pos)
+
+ cost[ch] = int(coeff or 1)
+
+def parse_fuzzy_test(source, info, case_flags):
+ saved_pos = source.pos
+ ch = source.get()
+ if ch in SPECIAL_CHARS:
+ if ch == "\\":
+ # An escape sequence outside a set.
+ return parse_escape(source, info, False)
+ elif ch == ".":
+ # Any character.
+ if info.flags & DOTALL:
+ return AnyAll()
+ elif info.flags & WORD:
+ return AnyU()
+ else:
+ return Any()
+ elif ch == "[":
+ # A character set.
+ return parse_set(source, info)
+ else:
+ raise error("expected character set", source.string, saved_pos)
+ elif ch:
+ # A literal.
+ return Character(ord(ch), case_flags=case_flags)
+ else:
+ raise error("expected character set", source.string, saved_pos)
+
+def parse_count(source):
+ "Parses a quantifier's count, which can be empty."
+ return source.get_while(DIGITS)
+
+def parse_paren(source, info):
+ """Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an
+ inline flag.
+ """
+ saved_pos = source.pos
+ ch = source.get(True)
+ if ch == "?":
+ # (?...
+ saved_pos_2 = source.pos
+ ch = source.get(True)
+ if ch == "<":
+ # (?<...
+ saved_pos_3 = source.pos
+ ch = source.get()
+ if ch in ("=", "!"):
+ # (?<=... or (?")
+ saved_flags = info.flags
+ try:
+ subpattern = _parse_pattern(source, info)
+ source.expect(")")
+ finally:
+ info.flags = saved_flags
+ source.ignore_space = bool(info.flags & VERBOSE)
+
+ info.close_group()
+ return Group(info, group, subpattern)
+ if ch in ("=", "!"):
+ # (?=... or (?!...: lookahead.
+ return parse_lookaround(source, info, False, ch == "=")
+ if ch == "P":
+ # (?P...: a Python extension.
+ return parse_extension(source, info)
+ if ch == "#":
+ # (?#...: a comment.
+ return parse_comment(source)
+ if ch == "(":
+ # (?(...: a conditional subpattern.
+ return parse_conditional(source, info)
+ if ch == ">":
+ # (?>...: an atomic subpattern.
+ return parse_atomic(source, info)
+ if ch == "|":
+ # (?|...: a common/reset groups branch.
+ return parse_common(source, info)
+ if ch == "R" or "0" <= ch <= "9":
+ # (?R...: probably a call to a group.
+ return parse_call_group(source, info, ch, saved_pos_2)
+ if ch == "&":
+ # (?&...: a call to a named group.
+ return parse_call_named_group(source, info, saved_pos_2)
+
+ # (?...: probably a flags subpattern.
+ source.pos = saved_pos_2
+ return parse_flags_subpattern(source, info)
+
+ if ch == "*":
+ # (*...
+ saved_pos_2 = source.pos
+ word = source.get_while(set(")>"), include=False)
+ if word[ : 1].isalpha():
+ verb = VERBS.get(word)
+ if not verb:
+ raise error("unknown verb", source.string, saved_pos_2)
+
+ source.expect(")")
+
+ return verb
+
+ # (...: an unnamed capture group.
+ source.pos = saved_pos
+ group = info.open_group()
+ saved_flags = info.flags
+ try:
+ subpattern = _parse_pattern(source, info)
+ source.expect(")")
+ finally:
+ info.flags = saved_flags
+ source.ignore_space = bool(info.flags & VERBOSE)
+
+ info.close_group()
+
+ return Group(info, group, subpattern)
+
+def parse_extension(source, info):
+ "Parses a Python extension."
+ saved_pos = source.pos
+ ch = source.get()
+ if ch == "<":
+ # (?P<...: a named capture group.
+ name = parse_name(source)
+ group = info.open_group(name)
+ source.expect(">")
+ saved_flags = info.flags
+ try:
+ subpattern = _parse_pattern(source, info)
+ source.expect(")")
+ finally:
+ info.flags = saved_flags
+ source.ignore_space = bool(info.flags & VERBOSE)
+
+ info.close_group()
+
+ return Group(info, group, subpattern)
+ if ch == "=":
+ # (?P=...: a named group reference.
+ name = parse_name(source, allow_numeric=True)
+ source.expect(")")
+ if info.is_open_group(name):
+ raise error("cannot refer to an open group", source.string,
+ saved_pos)
+
+ return make_ref_group(info, name, saved_pos)
+ if ch == ">" or ch == "&":
+ # (?P>...: a call to a group.
+ return parse_call_named_group(source, info, saved_pos)
+
+ source.pos = saved_pos
+ raise error("unknown extension", source.string, saved_pos)
+
+def parse_comment(source):
+ "Parses a comment."
+ while True:
+ saved_pos = source.pos
+ c = source.get(True)
+
+ if not c or c == ")":
+ break
+
+ if c == "\\":
+ c = source.get(True)
+
+ source.pos = saved_pos
+ source.expect(")")
+
+ return None
+
+def parse_lookaround(source, info, behind, positive):
+ "Parses a lookaround."
+ saved_flags = info.flags
+ try:
+ subpattern = _parse_pattern(source, info)
+ source.expect(")")
+ finally:
+ info.flags = saved_flags
+ source.ignore_space = bool(info.flags & VERBOSE)
+
+ return LookAround(behind, positive, subpattern)
+
+def parse_conditional(source, info):
+ "Parses a conditional subpattern."
+ saved_flags = info.flags
+ saved_pos = source.pos
+ ch = source.get()
+ if ch == "?":
+ # (?(?...
+ ch = source.get()
+ if ch in ("=", "!"):
+ # (?(?=... or (?(?!...: lookahead conditional.
+ return parse_lookaround_conditional(source, info, False, ch == "=")
+ if ch == "<":
+ # (?(?<...
+ ch = source.get()
+ if ch in ("=", "!"):
+ # (?(?<=... or (?(?"), include=False)
+
+ if not name:
+ raise error("missing group name", source.string, source.pos)
+
+ if name.isdigit():
+ min_group = 0 if allow_group_0 else 1
+ if not allow_numeric or int(name) < min_group:
+ raise error("bad character in group name", source.string,
+ source.pos)
+ else:
+ if not name.isidentifier():
+ raise error("bad character in group name", source.string,
+ source.pos)
+
+ return name
+
+def is_octal(string):
+ "Checks whether a string is octal."
+ return all(ch in OCT_DIGITS for ch in string)
+
+def is_decimal(string):
+ "Checks whether a string is decimal."
+ return all(ch in DIGITS for ch in string)
+
+def is_hexadecimal(string):
+ "Checks whether a string is hexadecimal."
+ return all(ch in HEX_DIGITS for ch in string)
+
+def parse_escape(source, info, in_set):
+ "Parses an escape sequence."
+ saved_ignore = source.ignore_space
+ source.ignore_space = False
+ ch = source.get()
+ source.ignore_space = saved_ignore
+ if not ch:
+ # A backslash at the end of the pattern.
+ raise error("bad escape (end of pattern)", source.string, source.pos)
+ if ch in HEX_ESCAPES:
+ # A hexadecimal escape sequence.
+ return parse_hex_escape(source, info, ch, HEX_ESCAPES[ch], in_set, ch)
+ elif ch == "g" and not in_set:
+ # A group reference.
+ saved_pos = source.pos
+ try:
+ return parse_group_ref(source, info)
+ except error:
+ # Invalid as a group reference, so assume it's a literal.
+ source.pos = saved_pos
+
+ return make_character(info, ord(ch), in_set)
+ elif ch == "G" and not in_set:
+ # A search anchor.
+ return SearchAnchor()
+ elif ch == "L" and not in_set:
+ # A string set.
+ return parse_string_set(source, info)
+ elif ch == "N":
+ # A named codepoint.
+ return parse_named_char(source, info, in_set)
+ elif ch in "pP":
+ # A Unicode property, positive or negative.
+ return parse_property(source, info, ch == "p", in_set)
+ elif ch == "R" and not in_set:
+ # A line ending.
+ charset = [0x0A, 0x0B, 0x0C, 0x0D]
+ if info.guess_encoding == UNICODE:
+ charset.extend([0x85, 0x2028, 0x2029])
+
+ return Atomic(Branch([String([0x0D, 0x0A]), SetUnion(info, [Character(c)
+ for c in charset])]))
+ elif ch == "X" and not in_set:
+ # A grapheme cluster.
+ return Grapheme()
+ elif ch in ALPHA:
+ # An alphabetic escape sequence.
+ # Positional escapes aren't allowed inside a character set.
+ if not in_set:
+ if info.flags & WORD:
+ value = WORD_POSITION_ESCAPES.get(ch)
+ else:
+ value = POSITION_ESCAPES.get(ch)
+
+ if value:
+ return value
+
+ value = CHARSET_ESCAPES.get(ch)
+ if value:
+ return value
+
+ value = CHARACTER_ESCAPES.get(ch)
+ if value:
+ return Character(ord(value))
+
+ raise error("bad escape \\%s" % ch, source.string, source.pos)
+ elif ch in DIGITS:
+ # A numeric escape sequence.
+ return parse_numeric_escape(source, info, ch, in_set)
+ else:
+ # A literal.
+ return make_character(info, ord(ch), in_set)
+
+def parse_numeric_escape(source, info, ch, in_set):
+ "Parses a numeric escape sequence."
+ if in_set or ch == "0":
+ # Octal escape sequence, max 3 digits.
+ return parse_octal_escape(source, info, [ch], in_set)
+
+ # At least 1 digit, so either octal escape or group.
+ digits = ch
+ saved_pos = source.pos
+ ch = source.get()
+ if ch in DIGITS:
+ # At least 2 digits, so either octal escape or group.
+ digits += ch
+ saved_pos = source.pos
+ ch = source.get()
+ if is_octal(digits) and ch in OCT_DIGITS:
+ # 3 octal digits, so octal escape sequence.
+ encoding = info.flags & _ALL_ENCODINGS
+ if encoding == ASCII or encoding == LOCALE:
+ octal_mask = 0xFF
+ else:
+ octal_mask = 0x1FF
+
+ value = int(digits + ch, 8) & octal_mask
+ return make_character(info, value)
+
+ # Group reference.
+ source.pos = saved_pos
+ if info.is_open_group(digits):
+ raise error("cannot refer to an open group", source.string, source.pos)
+
+ return make_ref_group(info, digits, source.pos)
+
+def parse_octal_escape(source, info, digits, in_set):
+ "Parses an octal escape sequence."
+ saved_pos = source.pos
+ ch = source.get()
+ while len(digits) < 3 and ch in OCT_DIGITS:
+ digits.append(ch)
+ saved_pos = source.pos
+ ch = source.get()
+
+ source.pos = saved_pos
+ try:
+ value = int("".join(digits), 8)
+ return make_character(info, value, in_set)
+ except ValueError:
+ if digits[0] in OCT_DIGITS:
+ raise error("incomplete escape \\%s" % ''.join(digits),
+ source.string, source.pos)
+ else:
+ raise error("bad escape \\%s" % digits[0], source.string,
+ source.pos)
+
+def parse_hex_escape(source, info, esc, expected_len, in_set, type):
+ "Parses a hex escape sequence."
+ saved_pos = source.pos
+ digits = []
+ for i in range(expected_len):
+ ch = source.get()
+ if ch not in HEX_DIGITS:
+ raise error("incomplete escape \\%s%s" % (type, ''.join(digits)),
+ source.string, saved_pos)
+ digits.append(ch)
+
+ try:
+ value = int("".join(digits), 16)
+ except ValueError:
+ pass
+ else:
+ if value < 0x110000:
+ return make_character(info, value, in_set)
+
+ # Bad hex escape.
+ raise error("bad hex escape \\%s%s" % (esc, ''.join(digits)),
+ source.string, saved_pos)
+
+def parse_group_ref(source, info):
+ "Parses a group reference."
+ source.expect("<")
+ saved_pos = source.pos
+ name = parse_name(source, True)
+ source.expect(">")
+ if info.is_open_group(name):
+ raise error("cannot refer to an open group", source.string, source.pos)
+
+ return make_ref_group(info, name, saved_pos)
+
+def parse_string_set(source, info):
+ "Parses a string set reference."
+ source.expect("<")
+ name = parse_name(source, True)
+ source.expect(">")
+ if name is None or name not in info.kwargs:
+ raise error("undefined named list", source.string, source.pos)
+
+ return make_string_set(info, name)
+
+def parse_named_char(source, info, in_set):
+ "Parses a named character."
+ saved_pos = source.pos
+ if source.match("{"):
+ name = source.get_while(NAMED_CHAR_PART)
+ if source.match("}"):
+ try:
+ value = unicodedata.lookup(name)
+ return make_character(info, ord(value), in_set)
+ except KeyError:
+ raise error("undefined character name", source.string,
+ source.pos)
+
+ source.pos = saved_pos
+ return make_character(info, ord("N"), in_set)
+
+def parse_property(source, info, positive, in_set):
+ "Parses a Unicode property."
+ saved_pos = source.pos
+ ch = source.get()
+ if ch == "{":
+ negate = source.match("^")
+ prop_name, name = parse_property_name(source)
+ if source.match("}"):
+ # It's correctly delimited.
+ prop = lookup_property(prop_name, name, positive != negate, source)
+ return make_property(info, prop, in_set)
+ elif ch and ch in "CLMNPSZ":
+ # An abbreviated property, eg \pL.
+ prop = lookup_property(None, ch, positive, source)
+ return make_property(info, prop, in_set)
+
+ # Not a property, so treat as a literal "p" or "P".
+ source.pos = saved_pos
+ ch = "p" if positive else "P"
+ return make_character(info, ord(ch), in_set)
+
+def parse_property_name(source):
+ "Parses a property name, which may be qualified."
+ name = source.get_while(PROPERTY_NAME_PART)
+ saved_pos = source.pos
+
+ ch = source.get()
+ if ch and ch in ":=":
+ prop_name = name
+ name = source.get_while(ALNUM | set(" &_-./")).strip()
+
+ if name:
+ # Name after the ":" or "=", so it's a qualified name.
+ saved_pos = source.pos
+ else:
+ # No name after the ":" or "=", so assume it's an unqualified name.
+ prop_name, name = None, prop_name
+ else:
+ prop_name = None
+
+ source.pos = saved_pos
+ return prop_name, name
+
+def parse_set(source, info):
+ "Parses a character set."
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+
+ saved_ignore = source.ignore_space
+ source.ignore_space = False
+ # Negative set?
+ negate = source.match("^")
+ try:
+ if version == VERSION0:
+ item = parse_set_imp_union(source, info)
+ else:
+ item = parse_set_union(source, info)
+
+ if not source.match("]"):
+ raise error("missing ]", source.string, source.pos)
+ finally:
+ source.ignore_space = saved_ignore
+
+ if negate:
+ item = item.with_flags(positive=not item.positive)
+
+ item = item.with_flags(case_flags=make_case_flags(info))
+
+ return item
+
+def parse_set_union(source, info):
+ "Parses a set union ([x||y])."
+ items = [parse_set_symm_diff(source, info)]
+ while source.match("||"):
+ items.append(parse_set_symm_diff(source, info))
+
+ if len(items) == 1:
+ return items[0]
+ return SetUnion(info, items)
+
+def parse_set_symm_diff(source, info):
+ "Parses a set symmetric difference ([x~~y])."
+ items = [parse_set_inter(source, info)]
+ while source.match("~~"):
+ items.append(parse_set_inter(source, info))
+
+ if len(items) == 1:
+ return items[0]
+ return SetSymDiff(info, items)
+
+def parse_set_inter(source, info):
+ "Parses a set intersection ([x&&y])."
+ items = [parse_set_diff(source, info)]
+ while source.match("&&"):
+ items.append(parse_set_diff(source, info))
+
+ if len(items) == 1:
+ return items[0]
+ return SetInter(info, items)
+
+def parse_set_diff(source, info):
+ "Parses a set difference ([x--y])."
+ items = [parse_set_imp_union(source, info)]
+ while source.match("--"):
+ items.append(parse_set_imp_union(source, info))
+
+ if len(items) == 1:
+ return items[0]
+ return SetDiff(info, items)
+
+def parse_set_imp_union(source, info):
+ "Parses a set implicit union ([xy])."
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+
+ items = [parse_set_member(source, info)]
+ while True:
+ saved_pos = source.pos
+ if source.match("]"):
+ # End of the set.
+ source.pos = saved_pos
+ break
+
+ if version == VERSION1 and any(source.match(op) for op in SET_OPS):
+ # The new behaviour has set operators.
+ source.pos = saved_pos
+ break
+
+ items.append(parse_set_member(source, info))
+
+ if len(items) == 1:
+ return items[0]
+ return SetUnion(info, items)
+
+def parse_set_member(source, info):
+ "Parses a member in a character set."
+ # Parse a set item.
+ start = parse_set_item(source, info)
+ saved_pos1 = source.pos
+ if (not isinstance(start, Character) or not start.positive or not
+ source.match("-")):
+ # It's not the start of a range.
+ return start
+
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+
+ # It looks like the start of a range of characters.
+ saved_pos2 = source.pos
+ if version == VERSION1 and source.match("-"):
+ # It's actually the set difference operator '--', so return the
+ # character.
+ source.pos = saved_pos1
+ return start
+
+ if source.match("]"):
+ # We've reached the end of the set, so return both the character and
+ # hyphen.
+ source.pos = saved_pos2
+ return SetUnion(info, [start, Character(ord("-"))])
+
+ # Parse a set item.
+ end = parse_set_item(source, info)
+ if not isinstance(end, Character) or not end.positive:
+ # It's not a range, so return the character, hyphen and property.
+ return SetUnion(info, [start, Character(ord("-")), end])
+
+ # It _is_ a range.
+ if start.value > end.value:
+ raise error("bad character range", source.string, source.pos)
+
+ if start.value == end.value:
+ return start
+
+ return Range(start.value, end.value)
+
+def parse_set_item(source, info):
+ "Parses an item in a character set."
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+
+ if source.match("\\"):
+ # An escape sequence in a set.
+ return parse_escape(source, info, True)
+
+ saved_pos = source.pos
+ if source.match("[:"):
+ # Looks like a POSIX character class.
+ try:
+ return parse_posix_class(source, info)
+ except ParseError:
+ # Not a POSIX character class.
+ source.pos = saved_pos
+
+ if version == VERSION1 and source.match("["):
+ # It's the start of a nested set.
+
+ # Negative set?
+ negate = source.match("^")
+ item = parse_set_union(source, info)
+
+ if not source.match("]"):
+ raise error("missing ]", source.string, source.pos)
+
+ if negate:
+ item = item.with_flags(positive=not item.positive)
+
+ return item
+
+ ch = source.get()
+ if not ch:
+ raise error("unterminated character set", source.string, source.pos)
+
+ return Character(ord(ch))
+
+def parse_posix_class(source, info):
+ "Parses a POSIX character class."
+ negate = source.match("^")
+ prop_name, name = parse_property_name(source)
+ if not source.match(":]"):
+ raise ParseError()
+
+ return lookup_property(prop_name, name, not negate, source, posix=True)
+
+def float_to_rational(flt):
+ "Converts a float to a rational pair."
+ int_part = int(flt)
+ error = flt - int_part
+ if abs(error) < 0.0001:
+ return int_part, 1
+
+ den, num = float_to_rational(1.0 / error)
+
+ return int_part * den + num, den
+
+def numeric_to_rational(numeric):
+ "Converts a numeric string to a rational string, if possible."
+ if numeric[ : 1] == "-":
+ sign, numeric = numeric[0], numeric[1 : ]
+ else:
+ sign = ""
+
+ parts = numeric.split("/")
+ if len(parts) == 2:
+ num, den = float_to_rational(float(parts[0]) / float(parts[1]))
+ elif len(parts) == 1:
+ num, den = float_to_rational(float(parts[0]))
+ else:
+ raise ValueError()
+
+ result = "{}{}/{}".format(sign, num, den)
+ if result.endswith("/1"):
+ return result[ : -2]
+
+ return result
+
+def standardise_name(name):
+ "Standardises a property or value name."
+ try:
+ return numeric_to_rational("".join(name))
+ except (ValueError, ZeroDivisionError):
+ return "".join(ch for ch in name if ch not in "_- ").upper()
+
+_POSIX_CLASSES = set('ALNUM DIGIT PUNCT XDIGIT'.split())
+
+_BINARY_VALUES = set('YES Y NO N TRUE T FALSE F'.split())
+
+def lookup_property(property, value, positive, source=None, posix=False):
+ "Looks up a property."
+ # Normalise the names (which may still be lists).
+ property = standardise_name(property) if property else None
+ value = standardise_name(value)
+
+ if (property, value) == ("GENERALCATEGORY", "ASSIGNED"):
+ property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive
+
+ if posix and not property and value.upper() in _POSIX_CLASSES:
+ value = 'POSIX' + value
+
+ if property:
+ # Both the property and the value are provided.
+ prop = PROPERTIES.get(property)
+ if not prop:
+ if not source:
+ raise error("unknown property")
+
+ raise error("unknown property", source.string, source.pos)
+
+ prop_id, value_dict = prop
+ val_id = value_dict.get(value)
+ if val_id is None:
+ if not source:
+ raise error("unknown property value")
+
+ raise error("unknown property value", source.string, source.pos)
+
+ return Property((prop_id << 16) | val_id, positive)
+
+ # Only the value is provided.
+ # It might be the name of a GC, script or block value.
+ for property in ("GC", "SCRIPT", "BLOCK"):
+ prop_id, value_dict = PROPERTIES.get(property)
+ val_id = value_dict.get(value)
+ if val_id is not None:
+ return Property((prop_id << 16) | val_id, positive)
+
+ # It might be the name of a binary property.
+ prop = PROPERTIES.get(value)
+ if prop:
+ prop_id, value_dict = prop
+ if set(value_dict) == _BINARY_VALUES:
+ return Property((prop_id << 16) | 1, positive)
+
+ return Property(prop_id << 16, not positive)
+
+ # It might be the name of a binary property starting with a prefix.
+ if value.startswith("IS"):
+ prop = PROPERTIES.get(value[2 : ])
+ if prop:
+ prop_id, value_dict = prop
+ if "YES" in value_dict:
+ return Property((prop_id << 16) | 1, positive)
+
+ # It might be the name of a script or block starting with a prefix.
+ for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")):
+ if value.startswith(prefix):
+ prop_id, value_dict = PROPERTIES.get(property)
+ val_id = value_dict.get(value[2 : ])
+ if val_id is not None:
+ return Property((prop_id << 16) | val_id, positive)
+
+ # Unknown property.
+ if not source:
+ raise error("unknown property")
+
+ raise error("unknown property", source.string, source.pos)
+
+def _compile_replacement(source, pattern, is_unicode):
+ "Compiles a replacement template escape sequence."
+ ch = source.get()
+ if ch in ALPHA:
+ # An alphabetic escape sequence.
+ value = CHARACTER_ESCAPES.get(ch)
+ if value:
+ return False, [ord(value)]
+
+ if ch in HEX_ESCAPES and (ch == "x" or is_unicode):
+ # A hexadecimal escape sequence.
+ return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch], ch)]
+
+ if ch == "g":
+ # A group preference.
+ return True, [compile_repl_group(source, pattern)]
+
+ if ch == "N" and is_unicode:
+ # A named character.
+ value = parse_repl_named_char(source)
+ if value is not None:
+ return False, [value]
+
+ raise error("bad escape \\%s" % ch, source.string, source.pos)
+
+ if isinstance(source.sep, bytes):
+ octal_mask = 0xFF
+ else:
+ octal_mask = 0x1FF
+
+ if ch == "0":
+ # An octal escape sequence.
+ digits = ch
+ while len(digits) < 3:
+ saved_pos = source.pos
+ ch = source.get()
+ if ch not in OCT_DIGITS:
+ source.pos = saved_pos
+ break
+ digits += ch
+
+ return False, [int(digits, 8) & octal_mask]
+
+ if ch in DIGITS:
+ # Either an octal escape sequence (3 digits) or a group reference (max
+ # 2 digits).
+ digits = ch
+ saved_pos = source.pos
+ ch = source.get()
+ if ch in DIGITS:
+ digits += ch
+ saved_pos = source.pos
+ ch = source.get()
+ if ch and is_octal(digits + ch):
+ # An octal escape sequence.
+ return False, [int(digits + ch, 8) & octal_mask]
+
+ # A group reference.
+ source.pos = saved_pos
+ return True, [int(digits)]
+
+ if ch == "\\":
+ # An escaped backslash is a backslash.
+ return False, [ord("\\")]
+
+ if not ch:
+ # A trailing backslash.
+ raise error("bad escape (end of pattern)", source.string, source.pos)
+
+ # An escaped non-backslash is a backslash followed by the literal.
+ return False, [ord("\\"), ord(ch)]
+
+def parse_repl_hex_escape(source, expected_len, type):
+ "Parses a hex escape sequence in a replacement string."
+ digits = []
+ for i in range(expected_len):
+ ch = source.get()
+ if ch not in HEX_DIGITS:
+ raise error("incomplete escape \\%s%s" % (type, ''.join(digits)),
+ source.string, source.pos)
+ digits.append(ch)
+
+ return int("".join(digits), 16)
+
+def parse_repl_named_char(source):
+ "Parses a named character in a replacement string."
+ saved_pos = source.pos
+ if source.match("{"):
+ name = source.get_while(ALPHA | set(" "))
+
+ if source.match("}"):
+ try:
+ value = unicodedata.lookup(name)
+ return ord(value)
+ except KeyError:
+ raise error("undefined character name", source.string,
+ source.pos)
+
+ source.pos = saved_pos
+ return None
+
+def compile_repl_group(source, pattern):
+ "Compiles a replacement template group reference."
+ source.expect("<")
+ name = parse_name(source, True, True)
+
+ source.expect(">")
+ if name.isdigit():
+ index = int(name)
+ if not 0 <= index <= pattern.groups:
+ raise error("invalid group reference", source.string, source.pos)
+
+ return index
+
+ try:
+ return pattern.groupindex[name]
+ except KeyError:
+ raise IndexError("unknown group")
+
+# The regular expression is parsed into a syntax tree. The different types of
+# node are defined below.
+
+INDENT = " "
+POSITIVE_OP = 0x1
+ZEROWIDTH_OP = 0x2
+FUZZY_OP = 0x4
+REVERSE_OP = 0x8
+REQUIRED_OP = 0x10
+
+POS_TEXT = {False: "NON-MATCH", True: "MATCH"}
+CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "",
+ FULLIGNORECASE: " FULL_IGNORE_CASE"}
+
+def make_sequence(items):
+ if len(items) == 1:
+ return items[0]
+ return Sequence(items)
+
+# Common base class for all nodes.
+class RegexBase:
+ def __init__(self):
+ self._key = self.__class__
+
+ def with_flags(self, positive=None, case_flags=None, zerowidth=None):
+ if positive is None:
+ positive = self.positive
+ else:
+ positive = bool(positive)
+ if case_flags is None:
+ case_flags = self.case_flags
+ else:
+ case_flags = CASE_FLAGS_COMBINATIONS[case_flags & CASE_FLAGS]
+ if zerowidth is None:
+ zerowidth = self.zerowidth
+ else:
+ zerowidth = bool(zerowidth)
+
+ if (positive == self.positive and case_flags == self.case_flags and
+ zerowidth == self.zerowidth):
+ return self
+
+ return self.rebuild(positive, case_flags, zerowidth)
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ pass
+
+ def optimise(self, info, reverse):
+ return self
+
+ def pack_characters(self, info):
+ return self
+
+ def remove_captures(self):
+ return self
+
+ def is_atomic(self):
+ return True
+
+ def can_be_affix(self):
+ return True
+
+ def contains_group(self):
+ return False
+
+ def get_firstset(self, reverse):
+ raise _FirstSetError()
+
+ def has_simple_start(self):
+ return False
+
+ def compile(self, reverse=False, fuzzy=False):
+ return self._compile(reverse, fuzzy)
+
+ def is_empty(self):
+ return False
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __eq__(self, other):
+ return type(self) is type(other) and self._key == other._key
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def get_required_string(self, reverse):
+ return self.max_width(), None
+
+# Base class for zero-width nodes.
+class ZeroWidthBase(RegexBase):
+ def __init__(self, positive=True):
+ RegexBase.__init__(self)
+ self.positive = bool(positive)
+
+ self._key = self.__class__, self.positive
+
+ def get_firstset(self, reverse):
+ return set([None])
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+ if reverse:
+ flags |= REVERSE_OP
+ return [(self._opcode, flags)]
+
+ def dump(self, indent, reverse):
+ print("{}{} {}".format(INDENT * indent, self._op_name,
+ POS_TEXT[self.positive]))
+
+ def max_width(self):
+ return 0
+
+class Any(RegexBase):
+ _opcode = {False: OP.ANY, True: OP.ANY_REV}
+ _op_name = "ANY"
+
+ def has_simple_start(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if fuzzy:
+ flags |= FUZZY_OP
+ return [(self._opcode[reverse], flags)]
+
+ def dump(self, indent, reverse):
+ print("{}{}".format(INDENT * indent, self._op_name))
+
+ def max_width(self):
+ return 1
+
+class AnyAll(Any):
+ _opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV}
+ _op_name = "ANY_ALL"
+
+class AnyU(Any):
+ _opcode = {False: OP.ANY_U, True: OP.ANY_U_REV}
+ _op_name = "ANY_U"
+
+class Atomic(RegexBase):
+ def __init__(self, subpattern):
+ RegexBase.__init__(self)
+ self.subpattern = subpattern
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.subpattern.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ self.subpattern = self.subpattern.optimise(info, reverse)
+
+ if self.subpattern.is_empty():
+ return self.subpattern
+ return self
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ self.subpattern = self.subpattern.remove_captures()
+ return self
+
+ def can_be_affix(self):
+ return self.subpattern.can_be_affix()
+
+ def contains_group(self):
+ return self.subpattern.contains_group()
+
+ def get_firstset(self, reverse):
+ return self.subpattern.get_firstset(reverse)
+
+ def has_simple_start(self):
+ return self.subpattern.has_simple_start()
+
+ def _compile(self, reverse, fuzzy):
+ return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) +
+ [(OP.END, )])
+
+ def dump(self, indent, reverse):
+ print("{}ATOMIC".format(INDENT * indent))
+ self.subpattern.dump(indent + 1, reverse)
+
+ def is_empty(self):
+ return self.subpattern.is_empty()
+
+ def __eq__(self, other):
+ return (type(self) is type(other) and self.subpattern ==
+ other.subpattern)
+
+ def max_width(self):
+ return self.subpattern.max_width()
+
+ def get_required_string(self, reverse):
+ return self.subpattern.get_required_string(reverse)
+
+class Boundary(ZeroWidthBase):
+ _opcode = OP.BOUNDARY
+ _op_name = "BOUNDARY"
+
+class Branch(RegexBase):
+ def __init__(self, branches):
+ RegexBase.__init__(self)
+ self.branches = branches
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ for b in self.branches:
+ b.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ if not self.branches:
+ return Sequence([])
+
+ # Flatten branches within branches.
+ branches = Branch._flatten_branches(info, reverse, self.branches)
+
+ # Move any common prefix or suffix out of the branches.
+ if reverse:
+ suffix, branches = Branch._split_common_suffix(info, branches)
+ prefix = []
+ else:
+ prefix, branches = Branch._split_common_prefix(info, branches)
+ suffix = []
+
+ # Try to reduce adjacent single-character branches to sets.
+ branches = Branch._reduce_to_set(info, reverse, branches)
+
+ if len(branches) > 1:
+ sequence = [Branch(branches)]
+
+ if not prefix or not suffix:
+ # We might be able to add a quick precheck before the branches.
+ firstset = self._add_precheck(info, reverse, branches)
+
+ if firstset:
+ if reverse:
+ sequence.append(firstset)
+ else:
+ sequence.insert(0, firstset)
+ else:
+ sequence = branches
+
+ return make_sequence(prefix + sequence + suffix)
+
+ def _add_precheck(self, info, reverse, branches):
+ charset = set()
+ pos = -1 if reverse else 0
+
+ for branch in branches:
+ if type(branch) is Literal and branch.case_flags == NOCASE:
+ charset.add(branch.characters[pos])
+ else:
+ return
+
+ if not charset:
+ return None
+
+ return _check_firstset(info, reverse, [Character(c) for c in charset])
+
+ def pack_characters(self, info):
+ self.branches = [b.pack_characters(info) for b in self.branches]
+ return self
+
+ def remove_captures(self):
+ self.branches = [b.remove_captures() for b in self.branches]
+ return self
+
+ def is_atomic(self):
+ return all(b.is_atomic() for b in self.branches)
+
+ def can_be_affix(self):
+ return all(b.can_be_affix() for b in self.branches)
+
+ def contains_group(self):
+ return any(b.contains_group() for b in self.branches)
+
+ def get_firstset(self, reverse):
+ fs = set()
+ for b in self.branches:
+ fs |= b.get_firstset(reverse)
+
+ return fs or set([None])
+
+ def _compile(self, reverse, fuzzy):
+ code = [(OP.BRANCH, )]
+ for b in self.branches:
+ code.extend(b.compile(reverse, fuzzy))
+ code.append((OP.NEXT, ))
+
+ code[-1] = (OP.END, )
+
+ return code
+
+ def dump(self, indent, reverse):
+ print("{}BRANCH".format(INDENT * indent))
+ self.branches[0].dump(indent + 1, reverse)
+ for b in self.branches[1 : ]:
+ print("{}OR".format(INDENT * indent))
+ b.dump(indent + 1, reverse)
+
+ @staticmethod
+ def _flatten_branches(info, reverse, branches):
+ # Flatten the branches so that there aren't branches of branches.
+ new_branches = []
+ for b in branches:
+ b = b.optimise(info, reverse)
+ if isinstance(b, Branch):
+ new_branches.extend(b.branches)
+ else:
+ new_branches.append(b)
+
+ return new_branches
+
+ @staticmethod
+ def _split_common_prefix(info, branches):
+ # Common leading items can be moved out of the branches.
+ # Get the items in the branches.
+ alternatives = []
+ for b in branches:
+ if isinstance(b, Sequence):
+ alternatives.append(b.items)
+ else:
+ alternatives.append([b])
+
+ # What is the maximum possible length of the prefix?
+ max_count = min(len(a) for a in alternatives)
+
+ # What is the longest common prefix?
+ prefix = alternatives[0]
+ pos = 0
+ end_pos = max_count
+ while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] ==
+ prefix[pos] for a in alternatives):
+ pos += 1
+ count = pos
+
+ if info.flags & UNICODE:
+ # We need to check that we're not splitting a sequence of
+ # characters which could form part of full case-folding.
+ count = pos
+ while count > 0 and not all(Branch._can_split(a, count) for a in
+ alternatives):
+ count -= 1
+
+ # No common prefix is possible.
+ if count == 0:
+ return [], branches
+
+ # Rebuild the branches.
+ new_branches = []
+ for a in alternatives:
+ new_branches.append(make_sequence(a[count : ]))
+
+ return prefix[ : count], new_branches
+
+ @staticmethod
+ def _split_common_suffix(info, branches):
+ # Common trailing items can be moved out of the branches.
+ # Get the items in the branches.
+ alternatives = []
+ for b in branches:
+ if isinstance(b, Sequence):
+ alternatives.append(b.items)
+ else:
+ alternatives.append([b])
+
+ # What is the maximum possible length of the suffix?
+ max_count = min(len(a) for a in alternatives)
+
+ # What is the longest common suffix?
+ suffix = alternatives[0]
+ pos = -1
+ end_pos = -1 - max_count
+ while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] ==
+ suffix[pos] for a in alternatives):
+ pos -= 1
+ count = -1 - pos
+
+ if info.flags & UNICODE:
+ # We need to check that we're not splitting a sequence of
+ # characters which could form part of full case-folding.
+ while count > 0 and not all(Branch._can_split_rev(a, count) for a
+ in alternatives):
+ count -= 1
+
+ # No common suffix is possible.
+ if count == 0:
+ return [], branches
+
+ # Rebuild the branches.
+ new_branches = []
+ for a in alternatives:
+ new_branches.append(make_sequence(a[ : -count]))
+
+ return suffix[-count : ], new_branches
+
+ @staticmethod
+ def _can_split(items, count):
+ # Check the characters either side of the proposed split.
+ if not Branch._is_full_case(items, count - 1):
+ return True
+
+ if not Branch._is_full_case(items, count):
+ return True
+
+ # Check whether a 1-1 split would be OK.
+ if Branch._is_folded(items[count - 1 : count + 1]):
+ return False
+
+ # Check whether a 1-2 split would be OK.
+ if (Branch._is_full_case(items, count + 2) and
+ Branch._is_folded(items[count - 1 : count + 2])):
+ return False
+
+ # Check whether a 2-1 split would be OK.
+ if (Branch._is_full_case(items, count - 2) and
+ Branch._is_folded(items[count - 2 : count + 1])):
+ return False
+
+ return True
+
+ @staticmethod
+ def _can_split_rev(items, count):
+ end = len(items)
+
+ # Check the characters either side of the proposed split.
+ if not Branch._is_full_case(items, end - count):
+ return True
+
+ if not Branch._is_full_case(items, end - count - 1):
+ return True
+
+ # Check whether a 1-1 split would be OK.
+ if Branch._is_folded(items[end - count - 1 : end - count + 1]):
+ return False
+
+ # Check whether a 1-2 split would be OK.
+ if (Branch._is_full_case(items, end - count + 2) and
+ Branch._is_folded(items[end - count - 1 : end - count + 2])):
+ return False
+
+ # Check whether a 2-1 split would be OK.
+ if (Branch._is_full_case(items, end - count - 2) and
+ Branch._is_folded(items[end - count - 2 : end - count + 1])):
+ return False
+
+ return True
+
+ @staticmethod
+ def _merge_common_prefixes(info, reverse, branches):
+ # Branches with the same case-sensitive character prefix can be grouped
+ # together if they are separated only by other branches with a
+ # character prefix.
+ prefixed = defaultdict(list)
+ order = {}
+ new_branches = []
+ for b in branches:
+ if Branch._is_simple_character(b):
+ # Branch starts with a simple character.
+ prefixed[b.value].append([b])
+ order.setdefault(b.value, len(order))
+ elif (isinstance(b, Sequence) and b.items and
+ Branch._is_simple_character(b.items[0])):
+ # Branch starts with a simple character.
+ prefixed[b.items[0].value].append(b.items)
+ order.setdefault(b.items[0].value, len(order))
+ else:
+ Branch._flush_char_prefix(info, reverse, prefixed, order,
+ new_branches)
+
+ new_branches.append(b)
+
+ Branch._flush_char_prefix(info, prefixed, order, new_branches)
+
+ return new_branches
+
+ @staticmethod
+ def _is_simple_character(c):
+ return isinstance(c, Character) and c.positive and not c.case_flags
+
+ @staticmethod
+ def _reduce_to_set(info, reverse, branches):
+ # Can the branches be reduced to a set?
+ new_branches = []
+ items = set()
+ case_flags = NOCASE
+ for b in branches:
+ if isinstance(b, (Character, Property, SetBase)):
+ # Branch starts with a single character.
+ if b.case_flags != case_flags:
+ # Different case sensitivity, so flush.
+ Branch._flush_set_members(info, reverse, items, case_flags,
+ new_branches)
+
+ case_flags = b.case_flags
+
+ items.add(b.with_flags(case_flags=NOCASE))
+ else:
+ Branch._flush_set_members(info, reverse, items, case_flags,
+ new_branches)
+
+ new_branches.append(b)
+
+ Branch._flush_set_members(info, reverse, items, case_flags,
+ new_branches)
+
+ return new_branches
+
+ @staticmethod
+ def _flush_char_prefix(info, reverse, prefixed, order, new_branches):
+ # Flush the prefixed branches.
+ if not prefixed:
+ return
+
+ for value, branches in sorted(prefixed.items(), key=lambda pair:
+ order[pair[0]]):
+ if len(branches) == 1:
+ new_branches.append(make_sequence(branches[0]))
+ else:
+ subbranches = []
+ optional = False
+ for b in branches:
+ if len(b) > 1:
+ subbranches.append(make_sequence(b[1 : ]))
+ elif not optional:
+ subbranches.append(Sequence())
+ optional = True
+
+ sequence = Sequence([Character(value), Branch(subbranches)])
+ new_branches.append(sequence.optimise(info, reverse))
+
+ prefixed.clear()
+ order.clear()
+
+ @staticmethod
+ def _flush_set_members(info, reverse, items, case_flags, new_branches):
+ # Flush the set members.
+ if not items:
+ return
+
+ if len(items) == 1:
+ item = list(items)[0]
+ else:
+ item = SetUnion(info, list(items)).optimise(info, reverse)
+
+ new_branches.append(item.with_flags(case_flags=case_flags))
+
+ items.clear()
+
+ @staticmethod
+ def _is_full_case(items, i):
+ if not 0 <= i < len(items):
+ return False
+
+ item = items[i]
+ return (isinstance(item, Character) and item.positive and
+ (item.case_flags & FULLIGNORECASE) == FULLIGNORECASE)
+
+ @staticmethod
+ def _is_folded(items):
+ if len(items) < 2:
+ return False
+
+ for i in items:
+ if (not isinstance(i, Character) or not i.positive or not
+ i.case_flags):
+ return False
+
+ folded = "".join(chr(i.value) for i in items)
+ folded = _regex.fold_case(FULL_CASE_FOLDING, folded)
+
+ # Get the characters which expand to multiple codepoints on folding.
+ expanding_chars = _regex.get_expand_on_folding()
+
+ for c in expanding_chars:
+ if folded == _regex.fold_case(FULL_CASE_FOLDING, c):
+ return True
+
+ return False
+
+ def is_empty(self):
+ return all(b.is_empty() for b in self.branches)
+
+ def __eq__(self, other):
+ return type(self) is type(other) and self.branches == other.branches
+
+ def max_width(self):
+ return max(b.max_width() for b in self.branches)
+
+class CallGroup(RegexBase):
+ def __init__(self, info, group, position):
+ RegexBase.__init__(self)
+ self.info = info
+ self.group = group
+ self.position = position
+
+ self._key = self.__class__, self.group
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ try:
+ self.group = int(self.group)
+ except ValueError:
+ try:
+ self.group = self.info.group_index[self.group]
+ except KeyError:
+ raise error("invalid group reference", pattern, self.position)
+
+ if not 0 <= self.group <= self.info.group_count:
+ raise error("unknown group", pattern, self.position)
+
+ if self.group > 0 and self.info.open_group_count[self.group] > 1:
+ raise error("ambiguous group reference", pattern, self.position)
+
+ self.info.group_calls.append((self, reverse, fuzzy))
+
+ self._key = self.__class__, self.group
+
+ def remove_captures(self):
+ raise error("group reference not allowed", pattern, self.position)
+
+ def _compile(self, reverse, fuzzy):
+ return [(OP.GROUP_CALL, self.call_ref)]
+
+ def dump(self, indent, reverse):
+ print("{}GROUP_CALL {}".format(INDENT * indent, self.group))
+
+ def __eq__(self, other):
+ return type(self) is type(other) and self.group == other.group
+
+ def max_width(self):
+ return UNLIMITED
+
+ def __del__(self):
+ self.info = None
+
+class CallRef(RegexBase):
+ def __init__(self, ref, parsed):
+ self.ref = ref
+ self.parsed = parsed
+
+ def _compile(self, reverse, fuzzy):
+ return ([(OP.CALL_REF, self.ref)] + self.parsed._compile(reverse,
+ fuzzy) + [(OP.END, )])
+
+class Character(RegexBase):
+ _opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False):
+ OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE,
+ False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE,
+ True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV,
+ (FULLIGNORECASE, True): OP.CHARACTER_IGN_REV}
+
+ def __init__(self, value, positive=True, case_flags=NOCASE,
+ zerowidth=False):
+ RegexBase.__init__(self)
+ self.value = value
+ self.positive = bool(positive)
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+ self.zerowidth = bool(zerowidth)
+
+ if (self.positive and (self.case_flags & FULLIGNORECASE) ==
+ FULLIGNORECASE):
+ self.folded = _regex.fold_case(FULL_CASE_FOLDING, chr(self.value))
+ else:
+ self.folded = chr(self.value)
+
+ self._key = (self.__class__, self.value, self.positive,
+ self.case_flags, self.zerowidth)
+
+ def rebuild(self, positive, case_flags, zerowidth):
+ return Character(self.value, positive, case_flags, zerowidth)
+
+ def optimise(self, info, reverse, in_set=False):
+ return self
+
+ def get_firstset(self, reverse):
+ return set([self])
+
+ def has_simple_start(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if self.zerowidth:
+ flags |= ZEROWIDTH_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+
+ code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags,
+ self.value])
+
+ if len(self.folded) > 1:
+ # The character expands on full case-folding.
+ code = Branch([code, String([ord(c) for c in self.folded],
+ case_flags=self.case_flags)])
+
+ return code.compile(reverse, fuzzy)
+
+ def dump(self, indent, reverse):
+ display = ascii(chr(self.value)).lstrip("bu")
+ print("{}CHARACTER {} {}{}".format(INDENT * indent,
+ POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags]))
+
+ def matches(self, ch):
+ return (ch == self.value) == self.positive
+
+ def max_width(self):
+ return len(self.folded)
+
+ def get_required_string(self, reverse):
+ if not self.positive:
+ return 1, None
+
+ self.folded_characters = tuple(ord(c) for c in self.folded)
+
+ return 0, self
+
+class Conditional(RegexBase):
+ def __init__(self, info, group, yes_item, no_item, position):
+ RegexBase.__init__(self)
+ self.info = info
+ self.group = group
+ self.yes_item = yes_item
+ self.no_item = no_item
+ self.position = position
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ try:
+ self.group = int(self.group)
+ except ValueError:
+ try:
+ self.group = self.info.group_index[self.group]
+ except KeyError:
+ if self.group == 'DEFINE':
+ # 'DEFINE' is a special name unless there's a group with
+ # that name.
+ self.group = 0
+ else:
+ raise error("unknown group", pattern, self.position)
+
+ if not 0 <= self.group <= self.info.group_count:
+ raise error("invalid group reference", pattern, self.position)
+
+ self.yes_item.fix_groups(pattern, reverse, fuzzy)
+ self.no_item.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ yes_item = self.yes_item.optimise(info, reverse)
+ no_item = self.no_item.optimise(info, reverse)
+
+ return Conditional(info, self.group, yes_item, no_item, self.position)
+
+ def pack_characters(self, info):
+ self.yes_item = self.yes_item.pack_characters(info)
+ self.no_item = self.no_item.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ self.yes_item = self.yes_item.remove_captures()
+ self.no_item = self.no_item.remove_captures()
+
+ def is_atomic(self):
+ return self.yes_item.is_atomic() and self.no_item.is_atomic()
+
+ def can_be_affix(self):
+ return self.yes_item.can_be_affix() and self.no_item.can_be_affix()
+
+ def contains_group(self):
+ return self.yes_item.contains_group() or self.no_item.contains_group()
+
+ def get_firstset(self, reverse):
+ return (self.yes_item.get_firstset(reverse) |
+ self.no_item.get_firstset(reverse))
+
+ def _compile(self, reverse, fuzzy):
+ code = [(OP.GROUP_EXISTS, self.group)]
+ code.extend(self.yes_item.compile(reverse, fuzzy))
+ add_code = self.no_item.compile(reverse, fuzzy)
+ if add_code:
+ code.append((OP.NEXT, ))
+ code.extend(add_code)
+
+ code.append((OP.END, ))
+
+ return code
+
+ def dump(self, indent, reverse):
+ print("{}GROUP_EXISTS {}".format(INDENT * indent, self.group))
+ self.yes_item.dump(indent + 1, reverse)
+ if not self.no_item.is_empty():
+ print("{}OR".format(INDENT * indent))
+ self.no_item.dump(indent + 1, reverse)
+
+ def is_empty(self):
+ return self.yes_item.is_empty() and self.no_item.is_empty()
+
+ def __eq__(self, other):
+ return type(self) is type(other) and (self.group, self.yes_item,
+ self.no_item) == (other.group, other.yes_item, other.no_item)
+
+ def max_width(self):
+ return max(self.yes_item.max_width(), self.no_item.max_width())
+
+ def __del__(self):
+ self.info = None
+
+class DefaultBoundary(ZeroWidthBase):
+ _opcode = OP.DEFAULT_BOUNDARY
+ _op_name = "DEFAULT_BOUNDARY"
+
+class DefaultEndOfWord(ZeroWidthBase):
+ _opcode = OP.DEFAULT_END_OF_WORD
+ _op_name = "DEFAULT_END_OF_WORD"
+
+class DefaultStartOfWord(ZeroWidthBase):
+ _opcode = OP.DEFAULT_START_OF_WORD
+ _op_name = "DEFAULT_START_OF_WORD"
+
+class EndOfLine(ZeroWidthBase):
+ _opcode = OP.END_OF_LINE
+ _op_name = "END_OF_LINE"
+
+class EndOfLineU(EndOfLine):
+ _opcode = OP.END_OF_LINE_U
+ _op_name = "END_OF_LINE_U"
+
+class EndOfString(ZeroWidthBase):
+ _opcode = OP.END_OF_STRING
+ _op_name = "END_OF_STRING"
+
+class EndOfStringLine(ZeroWidthBase):
+ _opcode = OP.END_OF_STRING_LINE
+ _op_name = "END_OF_STRING_LINE"
+
+class EndOfStringLineU(EndOfStringLine):
+ _opcode = OP.END_OF_STRING_LINE_U
+ _op_name = "END_OF_STRING_LINE_U"
+
+class EndOfWord(ZeroWidthBase):
+ _opcode = OP.END_OF_WORD
+ _op_name = "END_OF_WORD"
+
+class Failure(ZeroWidthBase):
+ _op_name = "FAILURE"
+
+ def _compile(self, reverse, fuzzy):
+ return [(OP.FAILURE, )]
+
+class Fuzzy(RegexBase):
+ def __init__(self, subpattern, constraints=None):
+ RegexBase.__init__(self)
+ if constraints is None:
+ constraints = {}
+ self.subpattern = subpattern
+ self.constraints = constraints
+
+ # If an error type is mentioned in the cost equation, then its maximum
+ # defaults to unlimited.
+ if "cost" in constraints:
+ for e in "dis":
+ if e in constraints["cost"]:
+ constraints.setdefault(e, (0, None))
+
+ # If any error type is mentioned, then all the error maxima default to
+ # 0, otherwise they default to unlimited.
+ if set(constraints) & set("dis"):
+ for e in "dis":
+ constraints.setdefault(e, (0, 0))
+ else:
+ for e in "dis":
+ constraints.setdefault(e, (0, None))
+
+ # The maximum of the generic error type defaults to unlimited.
+ constraints.setdefault("e", (0, None))
+
+ # The cost equation defaults to equal costs. Also, the cost of any
+ # error type not mentioned in the cost equation defaults to 0.
+ if "cost" in constraints:
+ for e in "dis":
+ constraints["cost"].setdefault(e, 0)
+ else:
+ constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max":
+ constraints["e"][1]}
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.subpattern.fix_groups(pattern, reverse, True)
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ self.subpattern = self.subpattern.remove_captures()
+ return self
+
+ def is_atomic(self):
+ return self.subpattern.is_atomic()
+
+ def contains_group(self):
+ return self.subpattern.contains_group()
+
+ def _compile(self, reverse, fuzzy):
+ # The individual limits.
+ arguments = []
+ for e in "dise":
+ v = self.constraints[e]
+ arguments.append(v[0])
+ arguments.append(UNLIMITED if v[1] is None else v[1])
+
+ # The coeffs of the cost equation.
+ for e in "dis":
+ arguments.append(self.constraints["cost"][e])
+
+ # The maximum of the cost equation.
+ v = self.constraints["cost"]["max"]
+ arguments.append(UNLIMITED if v is None else v)
+
+ flags = 0
+ if reverse:
+ flags |= REVERSE_OP
+
+ test = self.constraints.get("test")
+
+ if test:
+ return ([(OP.FUZZY_EXT, flags) + tuple(arguments)] +
+ test.compile(reverse, True) + [(OP.NEXT,)] +
+ self.subpattern.compile(reverse, True) + [(OP.END,)])
+
+ return ([(OP.FUZZY, flags) + tuple(arguments)] +
+ self.subpattern.compile(reverse, True) + [(OP.END,)])
+
+ def dump(self, indent, reverse):
+ constraints = self._constraints_to_string()
+ if constraints:
+ constraints = " " + constraints
+ print("{}FUZZY{}".format(INDENT * indent, constraints))
+ self.subpattern.dump(indent + 1, reverse)
+
+ def is_empty(self):
+ return self.subpattern.is_empty()
+
+ def __eq__(self, other):
+ return (type(self) is type(other) and self.subpattern ==
+ other.subpattern and self.constraints == other.constraints)
+
+ def max_width(self):
+ return UNLIMITED
+
+ def _constraints_to_string(self):
+ constraints = []
+
+ for name in "ids":
+ min, max = self.constraints[name]
+ if max == 0:
+ continue
+
+ con = ""
+
+ if min > 0:
+ con = "{}<=".format(min)
+
+ con += name
+
+ if max is not None:
+ con += "<={}".format(max)
+
+ constraints.append(con)
+
+ cost = []
+ for name in "ids":
+ coeff = self.constraints["cost"][name]
+ if coeff > 0:
+ cost.append("{}{}".format(coeff, name))
+
+ limit = self.constraints["cost"]["max"]
+ if limit is not None and limit > 0:
+ cost = "{}<={}".format("+".join(cost), limit)
+ constraints.append(cost)
+
+ return ",".join(constraints)
+
+class Grapheme(RegexBase):
+ def _compile(self, reverse, fuzzy):
+ # Match at least 1 character until a grapheme boundary is reached. Note
+ # that this is the same whether matching forwards or backwards.
+ grapheme_matcher = Atomic(Sequence([LazyRepeat(AnyAll(), 1, None),
+ GraphemeBoundary()]))
+
+ return grapheme_matcher.compile(reverse, fuzzy)
+
+ def dump(self, indent, reverse):
+ print("{}GRAPHEME".format(INDENT * indent))
+
+ def max_width(self):
+ return UNLIMITED
+
+class GraphemeBoundary:
+ def compile(self, reverse, fuzzy):
+ return [(OP.GRAPHEME_BOUNDARY, 1)]
+
+class GreedyRepeat(RegexBase):
+ _opcode = OP.GREEDY_REPEAT
+ _op_name = "GREEDY_REPEAT"
+
+ def __init__(self, subpattern, min_count, max_count):
+ RegexBase.__init__(self)
+ self.subpattern = subpattern
+ self.min_count = min_count
+ self.max_count = max_count
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.subpattern.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ subpattern = self.subpattern.optimise(info, reverse)
+
+ return type(self)(subpattern, self.min_count, self.max_count)
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ self.subpattern = self.subpattern.remove_captures()
+ return self
+
+ def is_atomic(self):
+ return self.min_count == self.max_count and self.subpattern.is_atomic()
+
+ def can_be_affix(self):
+ return False
+
+ def contains_group(self):
+ return self.subpattern.contains_group()
+
+ def get_firstset(self, reverse):
+ fs = self.subpattern.get_firstset(reverse)
+ if self.min_count == 0:
+ fs.add(None)
+
+ return fs
+
+ def _compile(self, reverse, fuzzy):
+ repeat = [self._opcode, self.min_count]
+ if self.max_count is None:
+ repeat.append(UNLIMITED)
+ else:
+ repeat.append(self.max_count)
+
+ subpattern = self.subpattern.compile(reverse, fuzzy)
+ if not subpattern:
+ return []
+
+ return ([tuple(repeat)] + subpattern + [(OP.END, )])
+
+ def dump(self, indent, reverse):
+ if self.max_count is None:
+ limit = "INF"
+ else:
+ limit = self.max_count
+ print("{}{} {} {}".format(INDENT * indent, self._op_name,
+ self.min_count, limit))
+
+ self.subpattern.dump(indent + 1, reverse)
+
+ def is_empty(self):
+ return self.subpattern.is_empty()
+
+ def __eq__(self, other):
+ return type(self) is type(other) and (self.subpattern, self.min_count,
+ self.max_count) == (other.subpattern, other.min_count,
+ other.max_count)
+
+ def max_width(self):
+ if self.max_count is None:
+ return UNLIMITED
+
+ return self.subpattern.max_width() * self.max_count
+
+ def get_required_string(self, reverse):
+ max_count = UNLIMITED if self.max_count is None else self.max_count
+ if self.min_count == 0:
+ w = self.subpattern.max_width() * max_count
+ return min(w, UNLIMITED), None
+
+ ofs, req = self.subpattern.get_required_string(reverse)
+ if req:
+ return ofs, req
+
+ w = self.subpattern.max_width() * max_count
+ return min(w, UNLIMITED), None
+
+class PossessiveRepeat(GreedyRepeat):
+ def is_atomic(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ subpattern = self.subpattern.compile(reverse, fuzzy)
+ if not subpattern:
+ return []
+
+ repeat = [self._opcode, self.min_count]
+ if self.max_count is None:
+ repeat.append(UNLIMITED)
+ else:
+ repeat.append(self.max_count)
+
+ return ([(OP.ATOMIC, ), tuple(repeat)] + subpattern + [(OP.END, ),
+ (OP.END, )])
+
+ def dump(self, indent, reverse):
+ print("{}ATOMIC".format(INDENT * indent))
+
+ if self.max_count is None:
+ limit = "INF"
+ else:
+ limit = self.max_count
+ print("{}{} {} {}".format(INDENT * (indent + 1), self._op_name,
+ self.min_count, limit))
+
+ self.subpattern.dump(indent + 2, reverse)
+
+class Group(RegexBase):
+ def __init__(self, info, group, subpattern):
+ RegexBase.__init__(self)
+ self.info = info
+ self.group = group
+ self.subpattern = subpattern
+
+ self.call_ref = None
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.info.defined_groups[self.group] = (self, reverse, fuzzy)
+ self.subpattern.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ subpattern = self.subpattern.optimise(info, reverse)
+
+ return Group(self.info, self.group, subpattern)
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ return self.subpattern.remove_captures()
+
+ def is_atomic(self):
+ return self.subpattern.is_atomic()
+
+ def can_be_affix(self):
+ return False
+
+ def contains_group(self):
+ return True
+
+ def get_firstset(self, reverse):
+ return self.subpattern.get_firstset(reverse)
+
+ def has_simple_start(self):
+ return self.subpattern.has_simple_start()
+
+ def _compile(self, reverse, fuzzy):
+ code = []
+
+ public_group = private_group = self.group
+ if private_group < 0:
+ public_group = self.info.private_groups[private_group]
+ private_group = self.info.group_count - private_group
+
+ key = self.group, reverse, fuzzy
+ ref = self.info.call_refs.get(key)
+ if ref is not None:
+ code += [(OP.CALL_REF, ref)]
+
+ code += [(OP.GROUP, int(not reverse), private_group, public_group)]
+ code += self.subpattern.compile(reverse, fuzzy)
+ code += [(OP.END, )]
+
+ if ref is not None:
+ code += [(OP.END, )]
+
+ return code
+
+ def dump(self, indent, reverse):
+ group = self.group
+ if group < 0:
+ group = private_groups[group]
+ print("{}GROUP {}".format(INDENT * indent, group))
+ self.subpattern.dump(indent + 1, reverse)
+
+ def __eq__(self, other):
+ return (type(self) is type(other) and (self.group, self.subpattern) ==
+ (other.group, other.subpattern))
+
+ def max_width(self):
+ return self.subpattern.max_width()
+
+ def get_required_string(self, reverse):
+ return self.subpattern.get_required_string(reverse)
+
+ def __del__(self):
+ self.info = None
+
+class Keep(ZeroWidthBase):
+ _opcode = OP.KEEP
+ _op_name = "KEEP"
+
+class LazyRepeat(GreedyRepeat):
+ _opcode = OP.LAZY_REPEAT
+ _op_name = "LAZY_REPEAT"
+
+class LookAround(RegexBase):
+ _dir_text = {False: "AHEAD", True: "BEHIND"}
+
+ def __init__(self, behind, positive, subpattern):
+ RegexBase.__init__(self)
+ self.behind = bool(behind)
+ self.positive = bool(positive)
+ self.subpattern = subpattern
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.subpattern.fix_groups(pattern, self.behind, fuzzy)
+
+ def optimise(self, info, reverse):
+ subpattern = self.subpattern.optimise(info, self.behind)
+ if self.positive and subpattern.is_empty():
+ return subpattern
+
+ return LookAround(self.behind, self.positive, subpattern)
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ return self.subpattern.remove_captures()
+
+ def is_atomic(self):
+ return self.subpattern.is_atomic()
+
+ def can_be_affix(self):
+ return self.subpattern.can_be_affix()
+
+ def contains_group(self):
+ return self.subpattern.contains_group()
+
+ def get_firstset(self, reverse):
+ if self.positive and self.behind == reverse:
+ return self.subpattern.get_firstset(reverse)
+
+ return set([None])
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+ if reverse:
+ flags |= REVERSE_OP
+
+ return ([(OP.LOOKAROUND, flags, int(not self.behind))] +
+ self.subpattern.compile(self.behind) + [(OP.END, )])
+
+ def dump(self, indent, reverse):
+ print("{}LOOK{} {}".format(INDENT * indent,
+ self._dir_text[self.behind], POS_TEXT[self.positive]))
+ self.subpattern.dump(indent + 1, self.behind)
+
+ def is_empty(self):
+ return self.positive and self.subpattern.is_empty()
+
+ def __eq__(self, other):
+ return type(self) is type(other) and (self.behind, self.positive,
+ self.subpattern) == (other.behind, other.positive, other.subpattern)
+
+ def max_width(self):
+ return 0
+
+class LookAroundConditional(RegexBase):
+ _dir_text = {False: "AHEAD", True: "BEHIND"}
+
+ def __init__(self, behind, positive, subpattern, yes_item, no_item):
+ RegexBase.__init__(self)
+ self.behind = bool(behind)
+ self.positive = bool(positive)
+ self.subpattern = subpattern
+ self.yes_item = yes_item
+ self.no_item = no_item
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ self.subpattern.fix_groups(pattern, reverse, fuzzy)
+ self.yes_item.fix_groups(pattern, reverse, fuzzy)
+ self.no_item.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ subpattern = self.subpattern.optimise(info, self.behind)
+ yes_item = self.yes_item.optimise(info, self.behind)
+ no_item = self.no_item.optimise(info, self.behind)
+
+ return LookAroundConditional(self.behind, self.positive, subpattern,
+ yes_item, no_item)
+
+ def pack_characters(self, info):
+ self.subpattern = self.subpattern.pack_characters(info)
+ self.yes_item = self.yes_item.pack_characters(info)
+ self.no_item = self.no_item.pack_characters(info)
+ return self
+
+ def remove_captures(self):
+ self.subpattern = self.subpattern.remove_captures()
+ self.yes_item = self.yes_item.remove_captures()
+ self.no_item = self.no_item.remove_captures()
+
+ def is_atomic(self):
+ return (self.subpattern.is_atomic() and self.yes_item.is_atomic() and
+ self.no_item.is_atomic())
+
+ def can_be_affix(self):
+ return (self.subpattern.can_be_affix() and self.yes_item.can_be_affix()
+ and self.no_item.can_be_affix())
+
+ def contains_group(self):
+ return (self.subpattern.contains_group() or
+ self.yes_item.contains_group() or self.no_item.contains_group())
+
+ def _compile(self, reverse, fuzzy):
+ code = [(OP.CONDITIONAL, int(self.positive), int(not self.behind))]
+ code.extend(self.subpattern.compile(self.behind, fuzzy))
+ code.append((OP.NEXT, ))
+ code.extend(self.yes_item.compile(reverse, fuzzy))
+ add_code = self.no_item.compile(reverse, fuzzy)
+ if add_code:
+ code.append((OP.NEXT, ))
+ code.extend(add_code)
+
+ code.append((OP.END, ))
+
+ return code
+
+ def dump(self, indent, reverse):
+ print("{}CONDITIONAL {} {}".format(INDENT * indent,
+ self._dir_text[self.behind], POS_TEXT[self.positive]))
+ self.subpattern.dump(indent + 1, self.behind)
+ print("{}EITHER".format(INDENT * indent))
+ self.yes_item.dump(indent + 1, reverse)
+ if not self.no_item.is_empty():
+ print("{}OR".format(INDENT * indent))
+ self.no_item.dump(indent + 1, reverse)
+
+ def is_empty(self):
+ return (self.subpattern.is_empty() and self.yes_item.is_empty() or
+ self.no_item.is_empty())
+
+ def __eq__(self, other):
+ return type(self) is type(other) and (self.subpattern, self.yes_item,
+ self.no_item) == (other.subpattern, other.yes_item, other.no_item)
+
+ def max_width(self):
+ return max(self.yes_item.max_width(), self.no_item.max_width())
+
+ def get_required_string(self, reverse):
+ return self.max_width(), None
+
+class PrecompiledCode(RegexBase):
+ def __init__(self, code):
+ self.code = code
+
+ def _compile(self, reverse, fuzzy):
+ return [tuple(self.code)]
+
+class Property(RegexBase):
+ _opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False):
+ OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False):
+ OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True):
+ OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE,
+ True): OP.PROPERTY_IGN_REV}
+
+ def __init__(self, value, positive=True, case_flags=NOCASE,
+ zerowidth=False):
+ RegexBase.__init__(self)
+ self.value = value
+ self.positive = bool(positive)
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+ self.zerowidth = bool(zerowidth)
+
+ self._key = (self.__class__, self.value, self.positive,
+ self.case_flags, self.zerowidth)
+
+ def rebuild(self, positive, case_flags, zerowidth):
+ return Property(self.value, positive, case_flags, zerowidth)
+
+ def optimise(self, info, reverse, in_set=False):
+ return self
+
+ def get_firstset(self, reverse):
+ return set([self])
+
+ def has_simple_start(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if self.zerowidth:
+ flags |= ZEROWIDTH_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+ return [(self._opcode[self.case_flags, reverse], flags, self.value)]
+
+ def dump(self, indent, reverse):
+ prop = PROPERTY_NAMES[self.value >> 16]
+ name, value = prop[0], prop[1][self.value & 0xFFFF]
+ print("{}PROPERTY {} {}:{}{}".format(INDENT * indent,
+ POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags]))
+
+ def matches(self, ch):
+ return _regex.has_property_value(self.value, ch) == self.positive
+
+ def max_width(self):
+ return 1
+
+class Prune(ZeroWidthBase):
+ _op_name = "PRUNE"
+
+ def _compile(self, reverse, fuzzy):
+ return [(OP.PRUNE, )]
+
+class Range(RegexBase):
+ _opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN,
+ (FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN,
+ (NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV,
+ (FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV}
+ _op_name = "RANGE"
+
+ def __init__(self, lower, upper, positive=True, case_flags=NOCASE,
+ zerowidth=False):
+ RegexBase.__init__(self)
+ self.lower = lower
+ self.upper = upper
+ self.positive = bool(positive)
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+ self.zerowidth = bool(zerowidth)
+
+ self._key = (self.__class__, self.lower, self.upper, self.positive,
+ self.case_flags, self.zerowidth)
+
+ def rebuild(self, positive, case_flags, zerowidth):
+ return Range(self.lower, self.upper, positive, case_flags, zerowidth)
+
+ def optimise(self, info, reverse, in_set=False):
+ # Is the range case-sensitive?
+ if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
+ return self
+
+ # Is full case-folding possible?
+ if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) !=
+ FULLIGNORECASE):
+ return self
+
+ # Get the characters which expand to multiple codepoints on folding.
+ expanding_chars = _regex.get_expand_on_folding()
+
+ # Get the folded characters in the range.
+ items = []
+ for ch in expanding_chars:
+ if self.lower <= ord(ch) <= self.upper:
+ folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
+ items.append(String([ord(c) for c in folded],
+ case_flags=self.case_flags))
+
+ if not items:
+ # We can fall back to simple case-folding.
+ return self
+
+ if len(items) < self.upper - self.lower + 1:
+ # Not all the characters are covered by the full case-folding.
+ items.insert(0, self)
+
+ return Branch(items)
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if self.zerowidth:
+ flags |= ZEROWIDTH_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+ return [(self._opcode[self.case_flags, reverse], flags, self.lower,
+ self.upper)]
+
+ def dump(self, indent, reverse):
+ display_lower = ascii(chr(self.lower)).lstrip("bu")
+ display_upper = ascii(chr(self.upper)).lstrip("bu")
+ print("{}RANGE {} {} {}{}".format(INDENT * indent,
+ POS_TEXT[self.positive], display_lower, display_upper,
+ CASE_TEXT[self.case_flags]))
+
+ def matches(self, ch):
+ return (self.lower <= ch <= self.upper) == self.positive
+
+ def max_width(self):
+ return 1
+
+class RefGroup(RegexBase):
+ _opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False):
+ OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE,
+ False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE,
+ True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV,
+ (FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV}
+
+ def __init__(self, info, group, position, case_flags=NOCASE):
+ RegexBase.__init__(self)
+ self.info = info
+ self.group = group
+ self.position = position
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+
+ self._key = self.__class__, self.group, self.case_flags
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ try:
+ self.group = int(self.group)
+ except ValueError:
+ try:
+ self.group = self.info.group_index[self.group]
+ except KeyError:
+ raise error("unknown group", pattern, self.position)
+
+ if not 1 <= self.group <= self.info.group_count:
+ raise error("invalid group reference", pattern, self.position)
+
+ self._key = self.__class__, self.group, self.case_flags
+
+ def remove_captures(self):
+ raise error("group reference not allowed", pattern, self.position)
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if fuzzy:
+ flags |= FUZZY_OP
+ return [(self._opcode[self.case_flags, reverse], flags, self.group)]
+
+ def dump(self, indent, reverse):
+ print("{}REF_GROUP {}{}".format(INDENT * indent, self.group,
+ CASE_TEXT[self.case_flags]))
+
+ def max_width(self):
+ return UNLIMITED
+
+ def __del__(self):
+ self.info = None
+
+class SearchAnchor(ZeroWidthBase):
+ _opcode = OP.SEARCH_ANCHOR
+ _op_name = "SEARCH_ANCHOR"
+
+class Sequence(RegexBase):
+ def __init__(self, items=None):
+ RegexBase.__init__(self)
+ if items is None:
+ items = []
+
+ self.items = items
+
+ def fix_groups(self, pattern, reverse, fuzzy):
+ for s in self.items:
+ s.fix_groups(pattern, reverse, fuzzy)
+
+ def optimise(self, info, reverse):
+ # Flatten the sequences.
+ items = []
+ for s in self.items:
+ s = s.optimise(info, reverse)
+ if isinstance(s, Sequence):
+ items.extend(s.items)
+ else:
+ items.append(s)
+
+ return make_sequence(items)
+
+ def pack_characters(self, info):
+ "Packs sequences of characters into strings."
+ items = []
+ characters = []
+ case_flags = NOCASE
+ for s in self.items:
+ if type(s) is Character and s.positive and not s.zerowidth:
+ if s.case_flags != case_flags:
+ # Different case sensitivity, so flush, unless neither the
+ # previous nor the new character are cased.
+ if s.case_flags or is_cased_i(info, s.value):
+ Sequence._flush_characters(info, characters,
+ case_flags, items)
+
+ case_flags = s.case_flags
+
+ characters.append(s.value)
+ elif type(s) is String or type(s) is Literal:
+ if s.case_flags != case_flags:
+ # Different case sensitivity, so flush, unless the neither
+ # the previous nor the new string are cased.
+ if s.case_flags or any(is_cased_i(info, c) for c in
+ characters):
+ Sequence._flush_characters(info, characters,
+ case_flags, items)
+
+ case_flags = s.case_flags
+
+ characters.extend(s.characters)
+ else:
+ Sequence._flush_characters(info, characters, case_flags, items)
+
+ items.append(s.pack_characters(info))
+
+ Sequence._flush_characters(info, characters, case_flags, items)
+
+ return make_sequence(items)
+
+ def remove_captures(self):
+ self.items = [s.remove_captures() for s in self.items]
+ return self
+
+ def is_atomic(self):
+ return all(s.is_atomic() for s in self.items)
+
+ def can_be_affix(self):
+ return False
+
+ def contains_group(self):
+ return any(s.contains_group() for s in self.items)
+
+ def get_firstset(self, reverse):
+ fs = set()
+ items = self.items
+ if reverse:
+ items.reverse()
+ for s in items:
+ fs |= s.get_firstset(reverse)
+ if None not in fs:
+ return fs
+ fs.discard(None)
+
+ return fs | set([None])
+
+ def has_simple_start(self):
+ return bool(self.items) and self.items[0].has_simple_start()
+
+ def _compile(self, reverse, fuzzy):
+ seq = self.items
+ if reverse:
+ seq = seq[::-1]
+
+ code = []
+ for s in seq:
+ code.extend(s.compile(reverse, fuzzy))
+
+ return code
+
+ def dump(self, indent, reverse):
+ for s in self.items:
+ s.dump(indent, reverse)
+
+ @staticmethod
+ def _flush_characters(info, characters, case_flags, items):
+ if not characters:
+ return
+
+ # Disregard case_flags if all of the characters are case-less.
+ if case_flags & IGNORECASE:
+ if not any(is_cased_i(info, c) for c in characters):
+ case_flags = NOCASE
+
+ if (case_flags & FULLIGNORECASE) == FULLIGNORECASE:
+ literals = Sequence._fix_full_casefold(characters)
+
+ for item in literals:
+ chars = item.characters
+
+ if len(chars) == 1:
+ items.append(Character(chars[0], case_flags=item.case_flags))
+ else:
+ items.append(String(chars, case_flags=item.case_flags))
+ else:
+ if len(characters) == 1:
+ items.append(Character(characters[0], case_flags=case_flags))
+ else:
+ items.append(String(characters, case_flags=case_flags))
+
+ characters[:] = []
+
+ @staticmethod
+ def _fix_full_casefold(characters):
+ # Split a literal needing full case-folding into chunks that need it
+ # and chunks that can use simple case-folding, which is faster.
+ expanded = [_regex.fold_case(FULL_CASE_FOLDING, c) for c in
+ _regex.get_expand_on_folding()]
+ string = _regex.fold_case(FULL_CASE_FOLDING, ''.join(chr(c)
+ for c in characters)).lower()
+ chunks = []
+
+ for e in expanded:
+ found = string.find(e)
+
+ while found >= 0:
+ chunks.append((found, found + len(e)))
+ found = string.find(e, found + 1)
+
+ pos = 0
+ literals = []
+
+ for start, end in Sequence._merge_chunks(chunks):
+ if pos < start:
+ literals.append(Literal(characters[pos : start],
+ case_flags=IGNORECASE))
+
+ literals.append(Literal(characters[start : end],
+ case_flags=FULLIGNORECASE))
+ pos = end
+
+ if pos < len(characters):
+ literals.append(Literal(characters[pos : ], case_flags=IGNORECASE))
+
+ return literals
+
+ @staticmethod
+ def _merge_chunks(chunks):
+ if len(chunks) < 2:
+ return chunks
+
+ chunks.sort()
+
+ start, end = chunks[0]
+ new_chunks = []
+
+ for s, e in chunks[1 : ]:
+ if s <= end:
+ end = max(end, e)
+ else:
+ new_chunks.append((start, end))
+ start, end = s, e
+
+ new_chunks.append((start, end))
+
+ return new_chunks
+
+ def is_empty(self):
+ return all(i.is_empty() for i in self.items)
+
+ def __eq__(self, other):
+ return type(self) is type(other) and self.items == other.items
+
+ def max_width(self):
+ return sum(s.max_width() for s in self.items)
+
+ def get_required_string(self, reverse):
+ seq = self.items
+ if reverse:
+ seq = seq[::-1]
+
+ offset = 0
+
+ for s in seq:
+ ofs, req = s.get_required_string(reverse)
+ offset += ofs
+ if req:
+ return offset, req
+
+ return offset, None
+
+class SetBase(RegexBase):
+ def __init__(self, info, items, positive=True, case_flags=NOCASE,
+ zerowidth=False):
+ RegexBase.__init__(self)
+ self.info = info
+ self.items = tuple(items)
+ self.positive = bool(positive)
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+ self.zerowidth = bool(zerowidth)
+
+ self.char_width = 1
+
+ self._key = (self.__class__, self.items, self.positive,
+ self.case_flags, self.zerowidth)
+
+ def rebuild(self, positive, case_flags, zerowidth):
+ return type(self)(self.info, self.items, positive, case_flags,
+ zerowidth).optimise(self.info, False)
+
+ def get_firstset(self, reverse):
+ return set([self])
+
+ def has_simple_start(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if self.zerowidth:
+ flags |= ZEROWIDTH_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+ code = [(self._opcode[self.case_flags, reverse], flags)]
+ for m in self.items:
+ code.extend(m.compile())
+
+ code.append((OP.END, ))
+
+ return code
+
+ def dump(self, indent, reverse):
+ print("{}{} {}{}".format(INDENT * indent, self._op_name,
+ POS_TEXT[self.positive], CASE_TEXT[self.case_flags]))
+ for i in self.items:
+ i.dump(indent + 1, reverse)
+
+ def _handle_case_folding(self, info, in_set):
+ # Is the set case-sensitive?
+ if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
+ return self
+
+ # Is full case-folding possible?
+ if (not (self.info.flags & UNICODE) or (self.case_flags &
+ FULLIGNORECASE) != FULLIGNORECASE):
+ return self
+
+ # Get the characters which expand to multiple codepoints on folding.
+ expanding_chars = _regex.get_expand_on_folding()
+
+ # Get the folded characters in the set.
+ items = []
+ seen = set()
+ for ch in expanding_chars:
+ if self.matches(ord(ch)):
+ folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
+ if folded not in seen:
+ items.append(String([ord(c) for c in folded],
+ case_flags=self.case_flags))
+ seen.add(folded)
+
+ if not items:
+ # We can fall back to simple case-folding.
+ return self
+
+ return Branch([self] + items)
+
+ def max_width(self):
+ # Is the set case-sensitive?
+ if not self.positive or not (self.case_flags & IGNORECASE):
+ return 1
+
+ # Is full case-folding possible?
+ if (not (self.info.flags & UNICODE) or (self.case_flags &
+ FULLIGNORECASE) != FULLIGNORECASE):
+ return 1
+
+ # Get the characters which expand to multiple codepoints on folding.
+ expanding_chars = _regex.get_expand_on_folding()
+
+ # Get the folded characters in the set.
+ seen = set()
+ for ch in expanding_chars:
+ if self.matches(ord(ch)):
+ folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
+ seen.add(folded)
+
+ if not seen:
+ return 1
+
+ return max(len(folded) for folded in seen)
+
+ def __del__(self):
+ self.info = None
+
+class SetDiff(SetBase):
+ _opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False):
+ OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False):
+ OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True):
+ OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE,
+ True): OP.SET_DIFF_IGN_REV}
+ _op_name = "SET_DIFF"
+
+ def optimise(self, info, reverse, in_set=False):
+ items = self.items
+ if len(items) > 2:
+ items = [items[0], SetUnion(info, items[1 : ])]
+
+ if len(items) == 1:
+ return items[0].with_flags(case_flags=self.case_flags,
+ zerowidth=self.zerowidth).optimise(info, reverse, in_set)
+
+ self.items = tuple(m.optimise(info, reverse, in_set=True) for m in
+ items)
+
+ return self._handle_case_folding(info, in_set)
+
+ def matches(self, ch):
+ m = self.items[0].matches(ch) and not self.items[1].matches(ch)
+ return m == self.positive
+
+class SetInter(SetBase):
+ _opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False):
+ OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE,
+ False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE,
+ True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV,
+ (FULLIGNORECASE, True): OP.SET_INTER_IGN_REV}
+ _op_name = "SET_INTER"
+
+ def optimise(self, info, reverse, in_set=False):
+ items = []
+ for m in self.items:
+ m = m.optimise(info, reverse, in_set=True)
+ if isinstance(m, SetInter) and m.positive:
+ # Intersection in intersection.
+ items.extend(m.items)
+ else:
+ items.append(m)
+
+ if len(items) == 1:
+ return items[0].with_flags(case_flags=self.case_flags,
+ zerowidth=self.zerowidth).optimise(info, reverse, in_set)
+
+ self.items = tuple(items)
+
+ return self._handle_case_folding(info, in_set)
+
+ def matches(self, ch):
+ m = all(i.matches(ch) for i in self.items)
+ return m == self.positive
+
+class SetSymDiff(SetBase):
+ _opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False):
+ OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE,
+ False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV,
+ (IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True):
+ OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV}
+ _op_name = "SET_SYM_DIFF"
+
+ def optimise(self, info, reverse, in_set=False):
+ items = []
+ for m in self.items:
+ m = m.optimise(info, reverse, in_set=True)
+ if isinstance(m, SetSymDiff) and m.positive:
+ # Symmetric difference in symmetric difference.
+ items.extend(m.items)
+ else:
+ items.append(m)
+
+ if len(items) == 1:
+ return items[0].with_flags(case_flags=self.case_flags,
+ zerowidth=self.zerowidth).optimise(info, reverse, in_set)
+
+ self.items = tuple(items)
+
+ return self._handle_case_folding(info, in_set)
+
+ def matches(self, ch):
+ m = False
+ for i in self.items:
+ m = m != i.matches(ch)
+
+ return m == self.positive
+
+class SetUnion(SetBase):
+ _opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False):
+ OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE,
+ False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE,
+ True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV,
+ (FULLIGNORECASE, True): OP.SET_UNION_IGN_REV}
+ _op_name = "SET_UNION"
+
+ def optimise(self, info, reverse, in_set=False):
+ items = []
+ for m in self.items:
+ m = m.optimise(info, reverse, in_set=True)
+ if isinstance(m, SetUnion) and m.positive:
+ # Union in union.
+ items.extend(m.items)
+ else:
+ items.append(m)
+
+ if len(items) == 1:
+ i = items[0]
+ return i.with_flags(positive=i.positive == self.positive,
+ case_flags=self.case_flags,
+ zerowidth=self.zerowidth).optimise(info, reverse, in_set)
+
+ self.items = tuple(items)
+
+ return self._handle_case_folding(info, in_set)
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if self.positive:
+ flags |= POSITIVE_OP
+ if self.zerowidth:
+ flags |= ZEROWIDTH_OP
+ if fuzzy:
+ flags |= FUZZY_OP
+
+ characters, others = defaultdict(list), []
+ for m in self.items:
+ if isinstance(m, Character):
+ characters[m.positive].append(m.value)
+ else:
+ others.append(m)
+
+ code = [(self._opcode[self.case_flags, reverse], flags)]
+
+ for positive, values in characters.items():
+ flags = 0
+ if positive:
+ flags |= POSITIVE_OP
+ if len(values) == 1:
+ code.append((OP.CHARACTER, flags, values[0]))
+ else:
+ code.append((OP.STRING, flags, len(values)) + tuple(values))
+
+ for m in others:
+ code.extend(m.compile())
+
+ code.append((OP.END, ))
+
+ return code
+
+ def matches(self, ch):
+ m = any(i.matches(ch) for i in self.items)
+ return m == self.positive
+
+class Skip(ZeroWidthBase):
+ _op_name = "SKIP"
+ _opcode = OP.SKIP
+
+class StartOfLine(ZeroWidthBase):
+ _opcode = OP.START_OF_LINE
+ _op_name = "START_OF_LINE"
+
+class StartOfLineU(StartOfLine):
+ _opcode = OP.START_OF_LINE_U
+ _op_name = "START_OF_LINE_U"
+
+class StartOfString(ZeroWidthBase):
+ _opcode = OP.START_OF_STRING
+ _op_name = "START_OF_STRING"
+
+class StartOfWord(ZeroWidthBase):
+ _opcode = OP.START_OF_WORD
+ _op_name = "START_OF_WORD"
+
+class String(RegexBase):
+ _opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN,
+ (FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD,
+ (NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV,
+ (FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True):
+ OP.STRING_FLD_REV}
+
+ def __init__(self, characters, case_flags=NOCASE):
+ self.characters = tuple(characters)
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+
+ if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE:
+ folded_characters = []
+ for char in self.characters:
+ folded = _regex.fold_case(FULL_CASE_FOLDING, chr(char))
+ folded_characters.extend(ord(c) for c in folded)
+ else:
+ folded_characters = self.characters
+
+ self.folded_characters = tuple(folded_characters)
+ self.required = False
+
+ self._key = self.__class__, self.characters, self.case_flags
+
+ def get_firstset(self, reverse):
+ if reverse:
+ pos = -1
+ else:
+ pos = 0
+ return set([Character(self.characters[pos],
+ case_flags=self.case_flags)])
+
+ def has_simple_start(self):
+ return True
+
+ def _compile(self, reverse, fuzzy):
+ flags = 0
+ if fuzzy:
+ flags |= FUZZY_OP
+ if self.required:
+ flags |= REQUIRED_OP
+ return [(self._opcode[self.case_flags, reverse], flags,
+ len(self.folded_characters)) + self.folded_characters]
+
+ def dump(self, indent, reverse):
+ display = ascii("".join(chr(c) for c in self.characters)).lstrip("bu")
+ print("{}STRING {}{}".format(INDENT * indent, display,
+ CASE_TEXT[self.case_flags]))
+
+ def max_width(self):
+ return len(self.folded_characters)
+
+ def get_required_string(self, reverse):
+ return 0, self
+
+class Literal(String):
+ def dump(self, indent, reverse):
+ literal = ''.join(chr(c) for c in self.characters)
+ display = ascii(literal).lstrip("bu")
+ print("{}LITERAL MATCH {}{}".format(INDENT * indent, display,
+ CASE_TEXT[self.case_flags]))
+
+class StringSet(Branch):
+ def __init__(self, info, name, case_flags=NOCASE):
+ self.info = info
+ self.name = name
+ self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags]
+
+ self._key = self.__class__, self.name, self.case_flags
+
+ self.set_key = (name, self.case_flags)
+ if self.set_key not in info.named_lists_used:
+ info.named_lists_used[self.set_key] = len(info.named_lists_used)
+
+ index = self.info.named_lists_used[self.set_key]
+ items = self.info.kwargs[self.name]
+
+ case_flags = self.case_flags
+
+ encoding = self.info.flags & _ALL_ENCODINGS
+ fold_flags = encoding | case_flags
+
+ choices = []
+
+ for string in items:
+ if isinstance(string, str):
+ string = [ord(c) for c in string]
+
+ choices.append([Character(c, case_flags=case_flags) for c in
+ string])
+
+ # Sort from longest to shortest.
+ choices.sort(key=len, reverse=True)
+
+ self.branches = [Sequence(choice) for choice in choices]
+
+ def dump(self, indent, reverse):
+ print("{}STRING_SET {}{}".format(INDENT * indent, self.name,
+ CASE_TEXT[self.case_flags]))
+
+ def __del__(self):
+ self.info = None
+
+class Source:
+ "Scanner for the regular expression source string."
+ def __init__(self, string):
+ if isinstance(string, str):
+ self.string = string
+ self.char_type = chr
+ else:
+ self.string = string.decode("latin-1")
+ self.char_type = lambda c: bytes([c])
+
+ self.pos = 0
+ self.ignore_space = False
+ self.sep = string[ : 0]
+
+ def get(self, override_ignore=False):
+ string = self.string
+ pos = self.pos
+
+ try:
+ if self.ignore_space and not override_ignore:
+ while True:
+ if string[pos].isspace():
+ # Skip over the whitespace.
+ pos += 1
+ elif string[pos] == "#":
+ # Skip over the comment to the end of the line.
+ pos = string.index("\n", pos)
+ else:
+ break
+
+ ch = string[pos]
+ self.pos = pos + 1
+ return ch
+ except IndexError:
+ # We've reached the end of the string.
+ self.pos = pos
+ return string[ : 0]
+ except ValueError:
+ # The comment extended to the end of the string.
+ self.pos = len(string)
+ return string[ : 0]
+
+ def get_many(self, count=1):
+ string = self.string
+ pos = self.pos
+
+ try:
+ if self.ignore_space:
+ substring = []
+
+ while len(substring) < count:
+ while True:
+ if string[pos].isspace():
+ # Skip over the whitespace.
+ pos += 1
+ elif string[pos] == "#":
+ # Skip over the comment to the end of the line.
+ pos = string.index("\n", pos)
+ else:
+ break
+
+ substring.append(string[pos])
+ pos += 1
+
+ substring = "".join(substring)
+ else:
+ substring = string[pos : pos + count]
+ pos += len(substring)
+
+ self.pos = pos
+ return substring
+ except IndexError:
+ # We've reached the end of the string.
+ self.pos = len(string)
+ return "".join(substring)
+ except ValueError:
+ # The comment extended to the end of the string.
+ self.pos = len(string)
+ return "".join(substring)
+
+ def get_while(self, test_set, include=True):
+ string = self.string
+ pos = self.pos
+
+ if self.ignore_space:
+ try:
+ substring = []
+
+ while True:
+ if string[pos].isspace():
+ # Skip over the whitespace.
+ pos += 1
+ elif string[pos] == "#":
+ # Skip over the comment to the end of the line.
+ pos = string.index("\n", pos)
+ elif (string[pos] in test_set) == include:
+ substring.append(string[pos])
+ pos += 1
+ else:
+ break
+
+ self.pos = pos
+ except IndexError:
+ # We've reached the end of the string.
+ self.pos = len(string)
+ except ValueError:
+ # The comment extended to the end of the string.
+ self.pos = len(string)
+
+ return "".join(substring)
+ else:
+ try:
+ while (string[pos] in test_set) == include:
+ pos += 1
+
+ substring = string[self.pos : pos]
+
+ self.pos = pos
+
+ return substring
+ except IndexError:
+ # We've reached the end of the string.
+ substring = string[self.pos : pos]
+
+ self.pos = pos
+
+ return substring
+
+ def skip_while(self, test_set, include=True):
+ string = self.string
+ pos = self.pos
+
+ try:
+ if self.ignore_space:
+ while True:
+ if string[pos].isspace():
+ # Skip over the whitespace.
+ pos += 1
+ elif string[pos] == "#":
+ # Skip over the comment to the end of the line.
+ pos = string.index("\n", pos)
+ elif (string[pos] in test_set) == include:
+ pos += 1
+ else:
+ break
+ else:
+ while (string[pos] in test_set) == include:
+ pos += 1
+
+ self.pos = pos
+ except IndexError:
+ # We've reached the end of the string.
+ self.pos = len(string)
+ except ValueError:
+ # The comment extended to the end of the string.
+ self.pos = len(string)
+
+ def match(self, substring):
+ string = self.string
+ pos = self.pos
+
+ if self.ignore_space:
+ try:
+ for c in substring:
+ while True:
+ if string[pos].isspace():
+ # Skip over the whitespace.
+ pos += 1
+ elif string[pos] == "#":
+ # Skip over the comment to the end of the line.
+ pos = string.index("\n", pos)
+ else:
+ break
+
+ if string[pos] != c:
+ return False
+
+ pos += 1
+
+ self.pos = pos
+
+ return True
+ except IndexError:
+ # We've reached the end of the string.
+ return False
+ except ValueError:
+ # The comment extended to the end of the string.
+ return False
+ else:
+ if not string.startswith(substring, pos):
+ return False
+
+ self.pos = pos + len(substring)
+
+ return True
+
+ def expect(self, substring):
+ if not self.match(substring):
+ raise error("missing {}".format(substring), self.string, self.pos)
+
+ def at_end(self):
+ string = self.string
+ pos = self.pos
+
+ try:
+ if self.ignore_space:
+ while True:
+ if string[pos].isspace():
+ pos += 1
+ elif string[pos] == "#":
+ pos = string.index("\n", pos)
+ else:
+ break
+
+ return pos >= len(string)
+ except IndexError:
+ # We've reached the end of the string.
+ return True
+ except ValueError:
+ # The comment extended to the end of the string.
+ return True
+
+class Info:
+ "Info about the regular expression."
+
+ def __init__(self, flags=0, char_type=None, kwargs={}):
+ flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION]
+ self.flags = flags
+ self.global_flags = flags
+ self.inline_locale = False
+
+ self.kwargs = kwargs
+
+ self.group_count = 0
+ self.group_index = {}
+ self.group_name = {}
+ self.char_type = char_type
+ self.named_lists_used = {}
+ self.open_groups = []
+ self.open_group_count = {}
+ self.defined_groups = {}
+ self.group_calls = []
+ self.private_groups = {}
+
+ def open_group(self, name=None):
+ group = self.group_index.get(name)
+ if group is None:
+ while True:
+ self.group_count += 1
+ if name is None or self.group_count not in self.group_name:
+ break
+
+ group = self.group_count
+ if name:
+ self.group_index[name] = group
+ self.group_name[group] = name
+
+ if group in self.open_groups:
+ # We have a nested named group. We'll assign it a private group
+ # number, initially negative until we can assign a proper
+ # (positive) number.
+ group_alias = -(len(self.private_groups) + 1)
+ self.private_groups[group_alias] = group
+ group = group_alias
+
+ self.open_groups.append(group)
+ self.open_group_count[group] = self.open_group_count.get(group, 0) + 1
+
+ return group
+
+ def close_group(self):
+ self.open_groups.pop()
+
+ def is_open_group(self, name):
+ # In version 1, a group reference can refer to an open group. We'll
+ # just pretend the group isn't open.
+ version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+ if version == VERSION1:
+ return False
+
+ if name.isdigit():
+ group = int(name)
+ else:
+ group = self.group_index.get(name)
+
+ return group in self.open_groups
+
+def _check_group_features(info, parsed):
+ """Checks whether the reverse and fuzzy features of the group calls match
+ the groups which they call.
+ """
+ call_refs = {}
+ additional_groups = []
+ for call, reverse, fuzzy in info.group_calls:
+ # Look up the reference of this group call.
+ key = (call.group, reverse, fuzzy)
+ ref = call_refs.get(key)
+ if ref is None:
+ # This group doesn't have a reference yet, so look up its features.
+ if call.group == 0:
+ # Calling the pattern as a whole.
+ rev = bool(info.flags & REVERSE)
+ fuz = isinstance(parsed, Fuzzy)
+ if (rev, fuz) != (reverse, fuzzy):
+ # The pattern as a whole doesn't have the features we want,
+ # so we'll need to make a copy of it with the desired
+ # features.
+ additional_groups.append((CallRef(len(call_refs), parsed),
+ reverse, fuzzy))
+ else:
+ # Calling a capture group.
+ def_info = info.defined_groups[call.group]
+ group = def_info[0]
+ if def_info[1 : ] != (reverse, fuzzy):
+ # The group doesn't have the features we want, so we'll
+ # need to make a copy of it with the desired features.
+ additional_groups.append((group, reverse, fuzzy))
+
+ ref = len(call_refs)
+ call_refs[key] = ref
+
+ call.call_ref = ref
+
+ info.call_refs = call_refs
+ info.additional_groups = additional_groups
+
+def _get_required_string(parsed, flags):
+ "Gets the required string and related info of a parsed pattern."
+
+ req_offset, required = parsed.get_required_string(bool(flags & REVERSE))
+ if required:
+ required.required = True
+ if req_offset >= UNLIMITED:
+ req_offset = -1
+
+ req_flags = required.case_flags
+ if not (flags & UNICODE):
+ req_flags &= ~UNICODE
+
+ req_chars = required.folded_characters
+ else:
+ req_offset = 0
+ req_chars = ()
+ req_flags = 0
+
+ return req_offset, req_chars, req_flags
+
+class Scanner:
+ def __init__(self, lexicon, flags=0):
+ self.lexicon = lexicon
+
+ # Combine phrases into a compound pattern.
+ patterns = []
+ for phrase, action in lexicon:
+ # Parse the regular expression.
+ source = Source(phrase)
+ info = Info(flags, source.char_type)
+ source.ignore_space = bool(info.flags & VERBOSE)
+ parsed = _parse_pattern(source, info)
+ if not source.at_end():
+ raise error("unbalanced parenthesis", source.string,
+ source.pos)
+
+ # We want to forbid capture groups within each phrase.
+ patterns.append(parsed.remove_captures())
+
+ # Combine all the subpatterns into one pattern.
+ info = Info(flags)
+ patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)]
+ parsed = Branch(patterns)
+
+ # Optimise the compound pattern.
+ reverse = bool(info.flags & REVERSE)
+ parsed = parsed.optimise(info, reverse)
+ parsed = parsed.pack_characters(info)
+
+ # Get the required string.
+ req_offset, req_chars, req_flags = _get_required_string(parsed,
+ info.flags)
+
+ # Check the features of the groups.
+ _check_group_features(info, parsed)
+
+ # Complain if there are any group calls. They are not supported by the
+ # Scanner class.
+ if info.call_refs:
+ raise error("recursive regex not supported by Scanner",
+ source.string, source.pos)
+
+ reverse = bool(info.flags & REVERSE)
+
+ # Compile the compound pattern. The result is a list of tuples.
+ code = parsed.compile(reverse) + [(OP.SUCCESS, )]
+
+ # Flatten the code into a list of ints.
+ code = _flatten_code(code)
+
+ if not parsed.has_simple_start():
+ # Get the first set, if possible.
+ try:
+ fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
+ fs_code = _flatten_code(fs_code)
+ code = fs_code + code
+ except _FirstSetError:
+ pass
+
+ # Check the global flags for conflicts.
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+ if version not in (0, VERSION0, VERSION1):
+ raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
+
+ # Create the PatternObject.
+ #
+ # Local flags like IGNORECASE affect the code generation, but aren't
+ # needed by the PatternObject itself. Conversely, global flags like
+ # LOCALE _don't_ affect the code generation but _are_ needed by the
+ # PatternObject.
+ self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version,
+ code, {}, {}, {}, [], req_offset, req_chars, req_flags,
+ len(patterns))
+
+ def scan(self, string):
+ result = []
+ append = result.append
+ match = self.scanner.scanner(string).match
+ i = 0
+ while True:
+ m = match()
+ if not m:
+ break
+ j = m.end()
+ if i == j:
+ break
+ action = self.lexicon[m.lastindex - 1][1]
+ if hasattr(action, '__call__'):
+ self.match = m
+ action = action(self, m.group())
+ if action is not None:
+ append(action)
+ i = j
+
+ return result, string[i : ]
+
+# Get the known properties dict.
+PROPERTIES = _regex.get_properties()
+
+# Build the inverse of the properties dict.
+PROPERTY_NAMES = {}
+for prop_name, (prop_id, values) in PROPERTIES.items():
+ name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {}))
+ name = max(name, prop_name, key=len)
+ PROPERTY_NAMES[prop_id] = name, prop_values
+
+ for val_name, val_id in values.items():
+ prop_values[val_id] = max(prop_values.get(val_id, ""), val_name,
+ key=len)
+
+# Character escape sequences.
+CHARACTER_ESCAPES = {
+ "a": "\a",
+ "b": "\b",
+ "f": "\f",
+ "n": "\n",
+ "r": "\r",
+ "t": "\t",
+ "v": "\v",
+}
+
+# Predefined character set escape sequences.
+CHARSET_ESCAPES = {
+ "d": lookup_property(None, "Digit", True),
+ "D": lookup_property(None, "Digit", False),
+ "h": lookup_property(None, "Blank", True),
+ "s": lookup_property(None, "Space", True),
+ "S": lookup_property(None, "Space", False),
+ "w": lookup_property(None, "Word", True),
+ "W": lookup_property(None, "Word", False),
+}
+
+# Positional escape sequences.
+POSITION_ESCAPES = {
+ "A": StartOfString(),
+ "b": Boundary(),
+ "B": Boundary(False),
+ "K": Keep(),
+ "m": StartOfWord(),
+ "M": EndOfWord(),
+ "Z": EndOfString(),
+}
+
+# Positional escape sequences when WORD flag set.
+WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES)
+WORD_POSITION_ESCAPES.update({
+ "b": DefaultBoundary(),
+ "B": DefaultBoundary(False),
+ "m": DefaultStartOfWord(),
+ "M": DefaultEndOfWord(),
+})
+
+# Regex control verbs.
+VERBS = {
+ "FAIL": Failure(),
+ "F": Failure(),
+ "PRUNE": Prune(),
+ "SKIP": Skip(),
+}
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/regex.py b/env-llmeval/lib/python3.10/site-packages/regex/regex.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8cc1776570fc3a0447a9a04c7063738f4ee57f3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/regex/regex.py
@@ -0,0 +1,746 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
+#
+# This version of the SRE library can be redistributed under CNRI's
+# Python 1.6 license. For any other use, please contact Secret Labs
+# AB (info@pythonware.com).
+#
+# Portions of this engine have been developed in cooperation with
+# CNRI. Hewlett-Packard provided funding for 1.6 integration and
+# other compatibility work.
+#
+# 2010-01-16 mrab Python front-end re-written and extended
+
+r"""Support for regular expressions (RE).
+
+This module provides regular expression matching operations similar to those
+found in Perl. It supports both 8-bit and Unicode strings; both the pattern and
+the strings being processed can contain null bytes and characters outside the
+US ASCII range.
+
+Regular expressions can contain both special and ordinary characters. Most
+ordinary characters, like "A", "a", or "0", are the simplest regular
+expressions; they simply match themselves. You can concatenate ordinary
+characters, so last matches the string 'last'.
+
+There are a few differences between the old (legacy) behaviour and the new
+(enhanced) behaviour, which are indicated by VERSION0 or VERSION1.
+
+The special characters are:
+ "." Matches any character except a newline.
+ "^" Matches the start of the string.
+ "$" Matches the end of the string or just before the
+ newline at the end of the string.
+ "*" Matches 0 or more (greedy) repetitions of the preceding
+ RE. Greedy means that it will match as many repetitions
+ as possible.
+ "+" Matches 1 or more (greedy) repetitions of the preceding
+ RE.
+ "?" Matches 0 or 1 (greedy) of the preceding RE.
+ *?,+?,?? Non-greedy versions of the previous three special
+ characters.
+ *+,++,?+ Possessive versions of the previous three special
+ characters.
+ {m,n} Matches from m to n repetitions of the preceding RE.
+ {m,n}? Non-greedy version of the above.
+ {m,n}+ Possessive version of the above.
+ {...} Fuzzy matching constraints.
+ "\\" Either escapes special characters or signals a special
+ sequence.
+ [...] Indicates a set of characters. A "^" as the first
+ character indicates a complementing set.
+ "|" A|B, creates an RE that will match either A or B.
+ (...) Matches the RE inside the parentheses. The contents are
+ captured and can be retrieved or matched later in the
+ string.
+ (?flags-flags) VERSION1: Sets/clears the flags for the remainder of
+ the group or pattern; VERSION0: Sets the flags for the
+ entire pattern.
+ (?:...) Non-capturing version of regular parentheses.
+ (?>...) Atomic non-capturing version of regular parentheses.
+ (?flags-flags:...) Non-capturing version of regular parentheses with local
+ flags.
+ (?P...) The substring matched by the group is accessible by
+ name.
+ (?...) The substring matched by the group is accessible by
+ name.
+ (?P=name) Matches the text matched earlier by the group named
+ name.
+ (?#...) A comment; ignored.
+ (?=...) Matches if ... matches next, but doesn't consume the
+ string.
+ (?!...) Matches if ... doesn't match next.
+ (?<=...) Matches if preceded by ....
+ (? Matches the text matched by the group named name.
+ \G Matches the empty string, but only at the position where
+ the search started.
+ \h Matches horizontal whitespace.
+ \K Keeps only what follows for the entire match.
+ \L Named list. The list is provided as a keyword argument.
+ \m Matches the empty string, but only at the start of a word.
+ \M Matches the empty string, but only at the end of a word.
+ \n Matches the newline character.
+ \N{name} Matches the named character.
+ \p{name=value} Matches the character if its property has the specified
+ value.
+ \P{name=value} Matches the character if its property hasn't the specified
+ value.
+ \r Matches the carriage-return character.
+ \s Matches any whitespace character; equivalent to
+ [ \t\n\r\f\v].
+ \S Matches any non-whitespace character; equivalent to [^\s].
+ \t Matches the tab character.
+ \uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.
+ \UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code
+ XXXXXXXX.
+ \v Matches the vertical tab character.
+ \w Matches any alphanumeric character; equivalent to
+ [a-zA-Z0-9_] when matching a bytestring or a Unicode string
+ with the ASCII flag, or the whole range of Unicode
+ alphanumeric characters (letters plus digits plus
+ underscore) when matching a Unicode string. With LOCALE, it
+ will match the set [0-9_] plus characters defined as
+ letters for the current locale.
+ \W Matches the complement of \w; equivalent to [^\w].
+ \xXX Matches the character with 2-digit hex code XX.
+ \X Matches a grapheme.
+ \Z Matches only at the end of the string.
+ \\ Matches a literal backslash.
+
+This module exports the following functions:
+ match Match a regular expression pattern at the beginning of a string.
+ fullmatch Match a regular expression pattern against all of a string.
+ search Search a string for the presence of a pattern.
+ sub Substitute occurrences of a pattern found in a string using a
+ template string.
+ subf Substitute occurrences of a pattern found in a string using a
+ format string.
+ subn Same as sub, but also return the number of substitutions made.
+ subfn Same as subf, but also return the number of substitutions made.
+ split Split a string by the occurrences of a pattern. VERSION1: will
+ split at zero-width match; VERSION0: won't split at zero-width
+ match.
+ splititer Return an iterator yielding the parts of a split string.
+ findall Find all occurrences of a pattern in a string.
+ finditer Return an iterator yielding a match object for each match.
+ compile Compile a pattern into a Pattern object.
+ purge Clear the regular expression cache.
+ escape Backslash all non-alphanumerics or special characters in a
+ string.
+
+Most of the functions support a concurrent parameter: if True, the GIL will be
+released during matching, allowing other Python threads to run concurrently. If
+the string changes during matching, the behaviour is undefined. This parameter
+is not needed when working on the builtin (immutable) string classes.
+
+Some of the functions in this module take flags as optional parameters. Most of
+these flags can also be set within an RE:
+ A a ASCII Make \w, \W, \b, \B, \d, and \D match the
+ corresponding ASCII character categories. Default
+ when matching a bytestring.
+ B b BESTMATCH Find the best fuzzy match (default is first).
+ D DEBUG Print the parsed pattern.
+ E e ENHANCEMATCH Attempt to improve the fit after finding the first
+ fuzzy match.
+ F f FULLCASE Use full case-folding when performing
+ case-insensitive matching in Unicode.
+ I i IGNORECASE Perform case-insensitive matching.
+ L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the
+ current locale. (One byte per character only.)
+ M m MULTILINE "^" matches the beginning of lines (after a newline)
+ as well as the string. "$" matches the end of lines
+ (before a newline) as well as the end of the string.
+ P p POSIX Perform POSIX-standard matching (leftmost longest).
+ R r REVERSE Searches backwards.
+ S s DOTALL "." matches any character at all, including the
+ newline.
+ U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the
+ Unicode locale. Default when matching a Unicode
+ string.
+ V0 V0 VERSION0 Turn on the old legacy behaviour.
+ V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag
+ includes the FULLCASE flag.
+ W w WORD Make \b and \B work with default Unicode word breaks
+ and make ".", "^" and "$" work with Unicode line
+ breaks.
+ X x VERBOSE Ignore whitespace and comments for nicer looking REs.
+
+This module also defines an exception 'error'.
+
+"""
+
+# Public symbols.
+__all__ = ["cache_all", "compile", "DEFAULT_VERSION", "escape", "findall",
+ "finditer", "fullmatch", "match", "purge", "search", "split", "splititer",
+ "sub", "subf", "subfn", "subn", "template", "Scanner", "A", "ASCII", "B",
+ "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", "S", "DOTALL", "F",
+ "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", "POSIX",
+ "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0", "V1",
+ "VERSION1", "X", "VERBOSE", "W", "WORD", "error", "Regex", "__version__",
+ "__doc__", "RegexFlag"]
+
+__version__ = "2.5.140"
+
+# --------------------------------------------------------------------
+# Public interface.
+
+def match(pattern, string, flags=0, pos=None, endpos=None, partial=False,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Try to apply the pattern at the start of the string, returning a match
+ object, or None if no match was found."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.match(string, pos, endpos, concurrent, partial, timeout)
+
+def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Try to apply the pattern against all of the string, returning a match
+ object, or None if no match was found."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.fullmatch(string, pos, endpos, concurrent, partial, timeout)
+
+def search(pattern, string, flags=0, pos=None, endpos=None, partial=False,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Search through string looking for a match to the pattern, returning a
+ match object, or None if no match was found."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.search(string, pos, endpos, concurrent, partial, timeout)
+
+def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return the string obtained by replacing the leftmost (or rightmost with a
+ reverse pattern) non-overlapping occurrences of the pattern in string by the
+ replacement repl. repl can be either a string or a callable; if a string,
+ backslash escapes in it are processed; if a callable, it's passed the match
+ object and must return a replacement string to be used."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.sub(repl, string, count, pos, endpos, concurrent, timeout)
+
+def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return the string obtained by replacing the leftmost (or rightmost with a
+ reverse pattern) non-overlapping occurrences of the pattern in string by the
+ replacement format. format can be either a string or a callable; if a string,
+ it's treated as a format string; if a callable, it's passed the match object
+ and must return a replacement string to be used."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.subf(format, string, count, pos, endpos, concurrent, timeout)
+
+def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return a 2-tuple containing (new_string, number). new_string is the string
+ obtained by replacing the leftmost (or rightmost with a reverse pattern)
+ non-overlapping occurrences of the pattern in the source string by the
+ replacement repl. number is the number of substitutions that were made. repl
+ can be either a string or a callable; if a string, backslash escapes in it
+ are processed; if a callable, it's passed the match object and must return a
+ replacement string to be used."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.subn(repl, string, count, pos, endpos, concurrent, timeout)
+
+def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return a 2-tuple containing (new_string, number). new_string is the string
+ obtained by replacing the leftmost (or rightmost with a reverse pattern)
+ non-overlapping occurrences of the pattern in the source string by the
+ replacement format. number is the number of substitutions that were made. format
+ can be either a string or a callable; if a string, it's treated as a format
+ string; if a callable, it's passed the match object and must return a
+ replacement string to be used."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.subfn(format, string, count, pos, endpos, concurrent, timeout)
+
+def split(pattern, string, maxsplit=0, flags=0, concurrent=None, timeout=None,
+ ignore_unused=False, **kwargs):
+ """Split the source string by the occurrences of the pattern, returning a
+ list containing the resulting substrings. If capturing parentheses are used
+ in pattern, then the text of all groups in the pattern are also returned as
+ part of the resulting list. If maxsplit is nonzero, at most maxsplit splits
+ occur, and the remainder of the string is returned as the final element of
+ the list."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.split(string, maxsplit, concurrent, timeout)
+
+def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None,
+ timeout=None, ignore_unused=False, **kwargs):
+ "Return an iterator yielding the parts of a split string."
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.splititer(string, maxsplit, concurrent, timeout)
+
+def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
+ concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return a list of all matches in the string. The matches may be overlapped
+ if overlapped is True. If one or more groups are present in the pattern,
+ return a list of groups; this will be a list of tuples if the pattern has
+ more than one group. Empty matches are included in the result."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.findall(string, pos, endpos, overlapped, concurrent, timeout)
+
+def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
+ partial=False, concurrent=None, timeout=None, ignore_unused=False, **kwargs):
+ """Return an iterator over all matches in the string. The matches may be
+ overlapped if overlapped is True. For each match, the iterator returns a
+ match object. Empty matches are included in the result."""
+ pat = _compile(pattern, flags, ignore_unused, kwargs, True)
+ return pat.finditer(string, pos, endpos, overlapped, concurrent, partial,
+ timeout)
+
+def compile(pattern, flags=0, ignore_unused=False, cache_pattern=None, **kwargs):
+ "Compile a regular expression pattern, returning a pattern object."
+ if cache_pattern is None:
+ cache_pattern = _cache_all
+ return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern)
+
+def purge():
+ "Clear the regular expression cache"
+ _cache.clear()
+ _locale_sensitive.clear()
+
+# Whether to cache all patterns.
+_cache_all = True
+
+def cache_all(value=True):
+ """Sets whether to cache all patterns, even those are compiled explicitly.
+ Passing None has no effect, but returns the current setting."""
+ global _cache_all
+
+ if value is None:
+ return _cache_all
+
+ _cache_all = value
+
+def template(pattern, flags=0):
+ "Compile a template pattern, returning a pattern object."
+ return _compile(pattern, flags | TEMPLATE, False, {}, False)
+
+def escape(pattern, special_only=True, literal_spaces=False):
+ """Escape a string for use as a literal in a pattern. If special_only is
+ True, escape only special characters, else escape all non-alphanumeric
+ characters. If literal_spaces is True, don't escape spaces."""
+ # Convert it to Unicode.
+ if isinstance(pattern, bytes):
+ p = pattern.decode("latin-1")
+ else:
+ p = pattern
+
+ s = []
+ if special_only:
+ for c in p:
+ if c == " " and literal_spaces:
+ s.append(c)
+ elif c in _METACHARS or c.isspace():
+ s.append("\\")
+ s.append(c)
+ else:
+ s.append(c)
+ else:
+ for c in p:
+ if c == " " and literal_spaces:
+ s.append(c)
+ elif c in _ALNUM:
+ s.append(c)
+ else:
+ s.append("\\")
+ s.append(c)
+
+ r = "".join(s)
+ # Convert it back to bytes if necessary.
+ if isinstance(pattern, bytes):
+ r = r.encode("latin-1")
+
+ return r
+
+# --------------------------------------------------------------------
+# Internals.
+
+import regex._regex_core as _regex_core
+import regex._regex as _regex
+from threading import RLock as _RLock
+from locale import getpreferredencoding as _getpreferredencoding
+from regex._regex_core import *
+from regex._regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError,
+ _UnscopedFlagSet, _check_group_features, _compile_firstset,
+ _compile_replacement, _flatten_code, _fold_case, _get_required_string,
+ _parse_pattern, _shrink_cache)
+from regex._regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source
+ as _Source, Fuzzy as _Fuzzy)
+
+# Version 0 is the old behaviour, compatible with the original 're' module.
+# Version 1 is the new behaviour, which differs slightly.
+
+DEFAULT_VERSION = VERSION0
+
+_METACHARS = frozenset("()[]{}?*+|^$\\.-#&~")
+
+_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
+
+# Caches for the patterns and replacements.
+_cache = {}
+_cache_lock = _RLock()
+_named_args = {}
+_replacement_cache = {}
+_locale_sensitive = {}
+
+# Maximum size of the cache.
+_MAXCACHE = 500
+_MAXREPCACHE = 500
+
+def _compile(pattern, flags, ignore_unused, kwargs, cache_it):
+ "Compiles a regular expression to a PatternObject."
+
+ global DEFAULT_VERSION
+ try:
+ from regex import DEFAULT_VERSION
+ except ImportError:
+ pass
+
+ # We won't bother to cache the pattern if we're debugging.
+ if (flags & DEBUG) != 0:
+ cache_it = False
+
+ # What locale is this pattern using?
+ locale_key = (type(pattern), pattern)
+ if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0:
+ # This pattern is, or might be, locale-sensitive.
+ pattern_locale = _getpreferredencoding()
+ else:
+ # This pattern is definitely not locale-sensitive.
+ pattern_locale = None
+
+ def complain_unused_args():
+ if ignore_unused:
+ return
+
+ # Complain about any unused keyword arguments, possibly resulting from a typo.
+ unused_kwargs = set(kwargs) - {k for k, v in args_needed}
+ if unused_kwargs:
+ any_one = next(iter(unused_kwargs))
+ raise ValueError('unused keyword argument {!a}'.format(any_one))
+
+ if cache_it:
+ try:
+ # Do we know what keyword arguments are needed?
+ args_key = pattern, type(pattern), flags
+ args_needed = _named_args[args_key]
+
+ # Are we being provided with its required keyword arguments?
+ args_supplied = set()
+ if args_needed:
+ for k, v in args_needed:
+ try:
+ args_supplied.add((k, frozenset(kwargs[k])))
+ except KeyError:
+ raise error("missing named list: {!r}".format(k))
+
+ complain_unused_args()
+
+ args_supplied = frozenset(args_supplied)
+
+ # Have we already seen this regular expression and named list?
+ pattern_key = (pattern, type(pattern), flags, args_supplied,
+ DEFAULT_VERSION, pattern_locale)
+ return _cache[pattern_key]
+ except KeyError:
+ # It's a new pattern, or new named list for a known pattern.
+ pass
+
+ # Guess the encoding from the class of the pattern string.
+ if isinstance(pattern, str):
+ guess_encoding = UNICODE
+ elif isinstance(pattern, bytes):
+ guess_encoding = ASCII
+ elif isinstance(pattern, Pattern):
+ if flags:
+ raise ValueError("cannot process flags argument with a compiled pattern")
+
+ return pattern
+ else:
+ raise TypeError("first argument must be a string or compiled pattern")
+
+ # Set the default version in the core code in case it has been changed.
+ _regex_core.DEFAULT_VERSION = DEFAULT_VERSION
+
+ global_flags = flags
+
+ while True:
+ caught_exception = None
+ try:
+ source = _Source(pattern)
+ info = _Info(global_flags, source.char_type, kwargs)
+ info.guess_encoding = guess_encoding
+ source.ignore_space = bool(info.flags & VERBOSE)
+ parsed = _parse_pattern(source, info)
+ break
+ except _UnscopedFlagSet:
+ # Remember the global flags for the next attempt.
+ global_flags = info.global_flags
+ except error as e:
+ caught_exception = e
+
+ if caught_exception:
+ raise error(caught_exception.msg, caught_exception.pattern,
+ caught_exception.pos)
+
+ if not source.at_end():
+ raise error("unbalanced parenthesis", pattern, source.pos)
+
+ # Check the global flags for conflicts.
+ version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
+ if version not in (0, VERSION0, VERSION1):
+ raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
+
+ if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE):
+ raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible")
+
+ if isinstance(pattern, bytes) and (info.flags & UNICODE):
+ raise ValueError("cannot use UNICODE flag with a bytes pattern")
+
+ if not (info.flags & _ALL_ENCODINGS):
+ if isinstance(pattern, str):
+ info.flags |= UNICODE
+ else:
+ info.flags |= ASCII
+
+ reverse = bool(info.flags & REVERSE)
+ fuzzy = isinstance(parsed, _Fuzzy)
+
+ # Remember whether this pattern as an inline locale flag.
+ _locale_sensitive[locale_key] = info.inline_locale
+
+ # Fix the group references.
+ caught_exception = None
+ try:
+ parsed.fix_groups(pattern, reverse, False)
+ except error as e:
+ caught_exception = e
+
+ if caught_exception:
+ raise error(caught_exception.msg, caught_exception.pattern,
+ caught_exception.pos)
+
+ # Should we print the parsed pattern?
+ if flags & DEBUG:
+ parsed.dump(indent=0, reverse=reverse)
+
+ # Optimise the parsed pattern.
+ parsed = parsed.optimise(info, reverse)
+ parsed = parsed.pack_characters(info)
+
+ # Get the required string.
+ req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags)
+
+ # Build the named lists.
+ named_lists = {}
+ named_list_indexes = [None] * len(info.named_lists_used)
+ args_needed = set()
+ for key, index in info.named_lists_used.items():
+ name, case_flags = key
+ values = frozenset(kwargs[name])
+ if case_flags:
+ items = frozenset(_fold_case(info, v) for v in values)
+ else:
+ items = values
+ named_lists[name] = values
+ named_list_indexes[index] = items
+ args_needed.add((name, values))
+
+ complain_unused_args()
+
+ # Check the features of the groups.
+ _check_group_features(info, parsed)
+
+ # Compile the parsed pattern. The result is a list of tuples.
+ code = parsed.compile(reverse)
+
+ # Is there a group call to the pattern as a whole?
+ key = (0, reverse, fuzzy)
+ ref = info.call_refs.get(key)
+ if ref is not None:
+ code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )]
+
+ # Add the final 'success' opcode.
+ code += [(_OP.SUCCESS, )]
+
+ # Compile the additional copies of the groups that we need.
+ for group, rev, fuz in info.additional_groups:
+ code += group.compile(rev, fuz)
+
+ # Flatten the code into a list of ints.
+ code = _flatten_code(code)
+
+ if not parsed.has_simple_start():
+ # Get the first set, if possible.
+ try:
+ fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
+ fs_code = _flatten_code(fs_code)
+ code = fs_code + code
+ except _FirstSetError:
+ pass
+
+ # The named capture groups.
+ index_group = dict((v, n) for n, v in info.group_index.items())
+
+ # Create the PatternObject.
+ #
+ # Local flags like IGNORECASE affect the code generation, but aren't needed
+ # by the PatternObject itself. Conversely, global flags like LOCALE _don't_
+ # affect the code generation but _are_ needed by the PatternObject.
+ compiled_pattern = _regex.compile(pattern, info.flags | version, code,
+ info.group_index, index_group, named_lists, named_list_indexes,
+ req_offset, req_chars, req_flags, info.group_count)
+
+ # Do we need to reduce the size of the cache?
+ if len(_cache) >= _MAXCACHE:
+ with _cache_lock:
+ _shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE)
+
+ if cache_it:
+ if (info.flags & LOCALE) == 0:
+ pattern_locale = None
+
+ args_needed = frozenset(args_needed)
+
+ # Store this regular expression and named list.
+ pattern_key = (pattern, type(pattern), flags, args_needed,
+ DEFAULT_VERSION, pattern_locale)
+ _cache[pattern_key] = compiled_pattern
+
+ # Store what keyword arguments are needed.
+ _named_args[args_key] = args_needed
+
+ return compiled_pattern
+
+def _compile_replacement_helper(pattern, template):
+ "Compiles a replacement template."
+ # This function is called by the _regex module.
+
+ # Have we seen this before?
+ key = pattern.pattern, pattern.flags, template
+ compiled = _replacement_cache.get(key)
+ if compiled is not None:
+ return compiled
+
+ if len(_replacement_cache) >= _MAXREPCACHE:
+ _replacement_cache.clear()
+
+ is_unicode = isinstance(template, str)
+ source = _Source(template)
+ if is_unicode:
+ def make_string(char_codes):
+ return "".join(chr(c) for c in char_codes)
+ else:
+ def make_string(char_codes):
+ return bytes(char_codes)
+
+ compiled = []
+ literal = []
+ while True:
+ ch = source.get()
+ if not ch:
+ break
+ if ch == "\\":
+ # '_compile_replacement' will return either an int group reference
+ # or a string literal. It returns items (plural) in order to handle
+ # a 2-character literal (an invalid escape sequence).
+ is_group, items = _compile_replacement(source, pattern, is_unicode)
+ if is_group:
+ # It's a group, so first flush the literal.
+ if literal:
+ compiled.append(make_string(literal))
+ literal = []
+ compiled.extend(items)
+ else:
+ literal.extend(items)
+ else:
+ literal.append(ord(ch))
+
+ # Flush the literal.
+ if literal:
+ compiled.append(make_string(literal))
+
+ _replacement_cache[key] = compiled
+
+ return compiled
+
+# We define Pattern here after all the support objects have been defined.
+_pat = _compile('', 0, False, {}, False)
+Pattern = type(_pat)
+Match = type(_pat.match(''))
+del _pat
+
+# Make Pattern public for typing annotations.
+__all__.append("Pattern")
+__all__.append("Match")
+
+# We'll define an alias for the 'compile' function so that the repr of a
+# pattern object is eval-able.
+Regex = compile
+
+# Register myself for pickling.
+import copyreg as _copy_reg
+
+def _pickle(pattern):
+ return _regex.compile, pattern._pickled_data
+
+_copy_reg.pickle(Pattern, _pickle)
diff --git a/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py b/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py
new file mode 100644
index 0000000000000000000000000000000000000000..21cdb8a06d727055fc1b93c2c264639ddda0cc69
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/regex/test_regex.py
@@ -0,0 +1,4458 @@
+from weakref import proxy
+import copy
+import pickle
+import regex
+import string
+import sys
+import unittest
+
+# String subclasses for issue 18468.
+class StrSubclass(str):
+ def __getitem__(self, index):
+ return StrSubclass(super().__getitem__(index))
+
+class BytesSubclass(bytes):
+ def __getitem__(self, index):
+ return BytesSubclass(super().__getitem__(index))
+
+class RegexTests(unittest.TestCase):
+ PATTERN_CLASS = ""
+ FLAGS_WITH_COMPILED_PAT = "cannot process flags argument with a compiled pattern"
+ INVALID_GROUP_REF = "invalid group reference"
+ MISSING_GT = "missing >"
+ BAD_GROUP_NAME = "bad character in group name"
+ MISSING_GROUP_NAME = "missing group name"
+ MISSING_LT = "missing <"
+ UNKNOWN_GROUP_I = "unknown group"
+ UNKNOWN_GROUP = "unknown group"
+ BAD_ESCAPE = r"bad escape \(end of pattern\)"
+ BAD_OCTAL_ESCAPE = r"bad escape \\"
+ BAD_SET = "unterminated character set"
+ STR_PAT_ON_BYTES = "cannot use a string pattern on a bytes-like object"
+ BYTES_PAT_ON_STR = "cannot use a bytes pattern on a string-like object"
+ STR_PAT_BYTES_TEMPL = "expected str instance, bytes found"
+ BYTES_PAT_STR_TEMPL = "expected a bytes-like object, str found"
+ BYTES_PAT_UNI_FLAG = "cannot use UNICODE flag with a bytes pattern"
+ MIXED_FLAGS = "ASCII, LOCALE and UNICODE flags are mutually incompatible"
+ MISSING_RPAREN = "missing \\)"
+ TRAILING_CHARS = "unbalanced parenthesis"
+ BAD_CHAR_RANGE = "bad character range"
+ NOTHING_TO_REPEAT = "nothing to repeat"
+ MULTIPLE_REPEAT = "multiple repeat"
+ OPEN_GROUP = "cannot refer to an open group"
+ DUPLICATE_GROUP = "duplicate group"
+ CANT_TURN_OFF = "bad inline flags: cannot turn flags off"
+ UNDEF_CHAR_NAME = "undefined character name"
+
+ def assertTypedEqual(self, actual, expect, msg=None):
+ self.assertEqual(actual, expect, msg)
+
+ def recurse(actual, expect):
+ if isinstance(expect, (tuple, list)):
+ for x, y in zip(actual, expect):
+ recurse(x, y)
+ else:
+ self.assertIs(type(actual), type(expect), msg)
+
+ recurse(actual, expect)
+
+ def test_weakref(self):
+ s = 'QabbbcR'
+ x = regex.compile('ab+c')
+ y = proxy(x)
+ if x.findall('QabbbcR') != y.findall('QabbbcR'):
+ self.fail()
+
+ def test_search_star_plus(self):
+ self.assertEqual(regex.search('a*', 'xxx').span(0), (0, 0))
+ self.assertEqual(regex.search('x*', 'axx').span(), (0, 0))
+ self.assertEqual(regex.search('x+', 'axx').span(0), (1, 3))
+ self.assertEqual(regex.search('x+', 'axx').span(), (1, 3))
+ self.assertEqual(regex.search('x', 'aaa'), None)
+ self.assertEqual(regex.match('a*', 'xxx').span(0), (0, 0))
+ self.assertEqual(regex.match('a*', 'xxx').span(), (0, 0))
+ self.assertEqual(regex.match('x*', 'xxxa').span(0), (0, 3))
+ self.assertEqual(regex.match('x*', 'xxxa').span(), (0, 3))
+ self.assertEqual(regex.match('a+', 'xxx'), None)
+
+ def bump_num(self, matchobj):
+ int_value = int(matchobj[0])
+ return str(int_value + 1)
+
+ def test_basic_regex_sub(self):
+ self.assertEqual(regex.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
+ self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
+ '9.3 -3 24x100y')
+ self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
+ '9.3 -3 23x99y')
+
+ self.assertEqual(regex.sub('.', lambda m: r"\n", 'x'), "\\n")
+ self.assertEqual(regex.sub('.', r"\n", 'x'), "\n")
+
+ self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), 'xxxx')
+ self.assertEqual(regex.sub('(?Px)', r'\g\g<1>', 'xx'), 'xxxx')
+ self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'),
+ 'xxxx')
+ self.assertEqual(regex.sub('(?Px)', r'\g<1>\g<1>', 'xx'), 'xxxx')
+
+ self.assertEqual(regex.sub('a', r'\t\n\v\r\f\a\b', 'a'), "\t\n\v\r\f\a\b")
+ self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), "\t\n\v\r\f\a")
+ self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), chr(9) + chr(10)
+ + chr(11) + chr(13) + chr(12) + chr(7))
+
+ self.assertEqual(regex.sub(r'^\s*', 'X', 'test'), 'Xtest')
+
+ self.assertEqual(regex.sub(r"x", r"\x0A", "x"), "\n")
+ self.assertEqual(regex.sub(r"x", r"\u000A", "x"), "\n")
+ self.assertEqual(regex.sub(r"x", r"\U0000000A", "x"), "\n")
+ self.assertEqual(regex.sub(r"x", r"\N{LATIN CAPITAL LETTER A}",
+ "x"), "A")
+
+ self.assertEqual(regex.sub(br"x", br"\x0A", b"x"), b"\n")
+
+ def test_bug_449964(self):
+ # Fails for group followed by other escape.
+ self.assertEqual(regex.sub(r'(?Px)', r'\g<1>\g<1>\b', 'xx'),
+ "xx\bxx\b")
+
+ def test_bug_449000(self):
+ # Test for sub() on escaped characters.
+ self.assertEqual(regex.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
+ "abc\ndef\n")
+ self.assertEqual(regex.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
+ "abc\ndef\n")
+ self.assertEqual(regex.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
+ "abc\ndef\n")
+ self.assertEqual(regex.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
+ "abc\ndef\n")
+
+ def test_bug_1661(self):
+ # Verify that flags do not get silently ignored with compiled patterns
+ pattern = regex.compile('.')
+ self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT,
+ lambda: regex.match(pattern, 'A', regex.I))
+ self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT,
+ lambda: regex.search(pattern, 'A', regex.I))
+ self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT,
+ lambda: regex.findall(pattern, 'A', regex.I))
+ self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT,
+ lambda: regex.compile(pattern, regex.I))
+
+ def test_bug_3629(self):
+ # A regex that triggered a bug in the sre-code validator
+ self.assertEqual(repr(type(regex.compile("(?P)(?(quote))"))),
+ self.PATTERN_CLASS)
+
+ def test_sub_template_numeric_escape(self):
+ # Bug 776311 and friends.
+ self.assertEqual(regex.sub('x', r'\0', 'x'), "\0")
+ self.assertEqual(regex.sub('x', r'\000', 'x'), "\000")
+ self.assertEqual(regex.sub('x', r'\001', 'x'), "\001")
+ self.assertEqual(regex.sub('x', r'\008', 'x'), "\0" + "8")
+ self.assertEqual(regex.sub('x', r'\009', 'x'), "\0" + "9")
+ self.assertEqual(regex.sub('x', r'\111', 'x'), "\111")
+ self.assertEqual(regex.sub('x', r'\117', 'x'), "\117")
+
+ self.assertEqual(regex.sub('x', r'\1111', 'x'), "\1111")
+ self.assertEqual(regex.sub('x', r'\1111', 'x'), "\111" + "1")
+
+ self.assertEqual(regex.sub('x', r'\00', 'x'), '\x00')
+ self.assertEqual(regex.sub('x', r'\07', 'x'), '\x07')
+ self.assertEqual(regex.sub('x', r'\08', 'x'), "\0" + "8")
+ self.assertEqual(regex.sub('x', r'\09', 'x'), "\0" + "9")
+ self.assertEqual(regex.sub('x', r'\0a', 'x'), "\0" + "a")
+
+ self.assertEqual(regex.sub('x', r'\400', 'x'), "\u0100")
+ self.assertEqual(regex.sub('x', r'\777', 'x'), "\u01FF")
+ self.assertEqual(regex.sub(b'x', br'\400', b'x'), b"\x00")
+ self.assertEqual(regex.sub(b'x', br'\777', b'x'), b"\xFF")
+
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\1', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\8', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\9', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\11', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\18', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\1a', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\90', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\99', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\118', 'x')) # r'\11' + '8'
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\11a', 'x'))
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\181', 'x')) # r'\18' + '1'
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.sub('x', r'\800', 'x')) # r'\80' + '0'
+
+ # In Python 2.3 (etc), these loop endlessly in sre_parser.py.
+ self.assertEqual(regex.sub('(((((((((((x)))))))))))', r'\11', 'x'),
+ 'x')
+ self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
+ 'xz8')
+ self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
+ 'xza')
+
+ def test_qualified_re_sub(self):
+ self.assertEqual(regex.sub('a', 'b', 'aaaaa'), 'bbbbb')
+ self.assertEqual(regex.sub('a', 'b', 'aaaaa', 1), 'baaaa')
+
+ def test_bug_114660(self):
+ self.assertEqual(regex.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
+ 'hello there')
+
+ def test_bug_462270(self):
+ # Test for empty sub() behaviour, see SF bug #462270
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b--d-')
+ else:
+ self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b-d-')
+ self.assertEqual(regex.sub('(?V1)x*', '-', 'abxd'), '-a-b--d-')
+ self.assertEqual(regex.sub('x+', '-', 'abxd'), 'ab-d')
+
+ def test_bug_14462(self):
+ # chr(255) is a valid identifier in Python 3.
+ group_name = '\xFF'
+ self.assertEqual(regex.search(r'(?P<' + group_name + '>a)',
+ 'abc').group(group_name), 'a')
+
+ def test_symbolic_refs(self):
+ self.assertRaisesRegex(regex.error, self.MISSING_GT, lambda:
+ regex.sub('(?Px)', r'\gx)', r'\g<', 'xx'))
+ self.assertRaisesRegex(regex.error, self.MISSING_LT, lambda:
+ regex.sub('(?Px)', r'\g', 'xx'))
+ self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda:
+ regex.sub('(?Px)', r'\g', 'xx'))
+ self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda:
+ regex.sub('(?Px)', r'\g<1a1>', 'xx'))
+ self.assertRaisesRegex(IndexError, self.UNKNOWN_GROUP_I, lambda:
+ regex.sub('(?Px)', r'\g', 'xx'))
+
+ # The new behaviour of unmatched but valid groups is to treat them like
+ # empty matches in the replacement template, like in Perl.
+ self.assertEqual(regex.sub('(?Px)|(?Py)', r'\g', 'xx'), '')
+ self.assertEqual(regex.sub('(?Px)|(?Py)', r'\2', 'xx'), '')
+
+ # The old behaviour was to raise it as an IndexError.
+ self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda:
+ regex.sub('(?Px)', r'\g<-1>', 'xx'))
+
+ def test_re_subn(self):
+ self.assertEqual(regex.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
+ self.assertEqual(regex.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
+ self.assertEqual(regex.subn("b+", "x", "xyz"), ('xyz', 0))
+ self.assertEqual(regex.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
+ self.assertEqual(regex.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
+
+ def test_re_split(self):
+ self.assertEqual(regex.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split(":*", ":a:b::c"), ['', '', 'a', '',
+ 'b', '', 'c', ''])
+ self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', '', '',
+ 'a', ':', '', '', 'b', '::', '', '', 'c', '', ''])
+ self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', '', 'a',
+ '', 'b', '', 'c', ''])
+ self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', '',
+ None, 'a', ':', '', None, 'b', ':', '', None, 'c', None, ''])
+ else:
+ self.assertEqual(regex.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
+ self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', 'a',
+ ':', 'b', '::', 'c'])
+ self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', 'a', 'b',
+ 'c'])
+ self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', 'a',
+ ':', 'b', ':', 'c'])
+ self.assertEqual(regex.split("([b:]+)", ":a:b::c"), ['', ':', 'a',
+ ':b::', 'c'])
+ self.assertEqual(regex.split("(b)|(:+)", ":a:b::c"), ['', None, ':',
+ 'a', None, ':', '', 'b', None, '', None, '::', 'c'])
+ self.assertEqual(regex.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '',
+ '', 'c'])
+
+ self.assertEqual(regex.split("x", "xaxbxc"), ['', 'a', 'b', 'c'])
+ self.assertEqual([m for m in regex.splititer("x", "xaxbxc")], ['', 'a',
+ 'b', 'c'])
+
+ self.assertEqual(regex.split("(?r)x", "xaxbxc"), ['c', 'b', 'a', ''])
+ self.assertEqual([m for m in regex.splititer("(?r)x", "xaxbxc")], ['c',
+ 'b', 'a', ''])
+
+ self.assertEqual(regex.split("(x)|(y)", "xaxbxc"), ['', 'x', None, 'a',
+ 'x', None, 'b', 'x', None, 'c'])
+ self.assertEqual([m for m in regex.splititer("(x)|(y)", "xaxbxc")],
+ ['', 'x', None, 'a', 'x', None, 'b', 'x', None, 'c'])
+
+ self.assertEqual(regex.split("(?r)(x)|(y)", "xaxbxc"), ['c', 'x', None,
+ 'b', 'x', None, 'a', 'x', None, ''])
+ self.assertEqual([m for m in regex.splititer("(?r)(x)|(y)", "xaxbxc")],
+ ['c', 'x', None, 'b', 'x', None, 'a', 'x', None, ''])
+
+ self.assertEqual(regex.split(r"(?V1)\b", "a b c"), ['', 'a', ' ', 'b',
+ ' ', 'c', ''])
+ self.assertEqual(regex.split(r"(?V1)\m", "a b c"), ['', 'a ', 'b ',
+ 'c'])
+ self.assertEqual(regex.split(r"(?V1)\M", "a b c"), ['a', ' b', ' c',
+ ''])
+
+ def test_qualified_re_split(self):
+ self.assertEqual(regex.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
+ self.assertEqual(regex.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
+ self.assertEqual(regex.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':',
+ 'b::c'])
+
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', '',
+ '', 'a:b::c'])
+ else:
+ self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', 'a',
+ ':', 'b::c'])
+
+ def test_re_findall(self):
+ self.assertEqual(regex.findall(":+", "abc"), [])
+ self.assertEqual(regex.findall(":+", "a:b::c:::d"), [':', '::', ':::'])
+ self.assertEqual(regex.findall("(:+)", "a:b::c:::d"), [':', '::',
+ ':::'])
+ self.assertEqual(regex.findall("(:)(:*)", "a:b::c:::d"), [(':', ''),
+ (':', ':'), (':', '::')])
+
+ self.assertEqual(regex.findall(r"\((?P.{0,5}?TEST)\)",
+ "(MY TEST)"), ["MY TEST"])
+ self.assertEqual(regex.findall(r"\((?P.{0,3}?TEST)\)",
+ "(MY TEST)"), ["MY TEST"])
+ self.assertEqual(regex.findall(r"\((?P.{0,3}?T)\)", "(MY T)"),
+ ["MY T"])
+
+ self.assertEqual(regex.findall(r"[^a]{2}[A-Z]", "\n S"), [' S'])
+ self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), ['\n S'])
+ self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), [' S'])
+
+ self.assertEqual(regex.findall(r"X(Y[^Y]+?){1,2}( |Q)+DEF",
+ "XYABCYPPQ\nQ DEF"), [('YPPQ\n', ' ')])
+
+ self.assertEqual(regex.findall(r"(\nTest(\n+.+?){0,2}?)?\n+End",
+ "\nTest\nxyz\nxyz\nEnd"), [('\nTest\nxyz\nxyz', '\nxyz')])
+
+ def test_bug_117612(self):
+ self.assertEqual(regex.findall(r"(a|(b))", "aba"), [('a', ''), ('b',
+ 'b'), ('a', '')])
+
+ def test_re_match(self):
+ self.assertEqual(regex.match('a', 'a')[:], ('a',))
+ self.assertEqual(regex.match('(a)', 'a')[:], ('a', 'a'))
+ self.assertEqual(regex.match(r'(a)', 'a')[0], 'a')
+ self.assertEqual(regex.match(r'(a)', 'a')[1], 'a')
+ self.assertEqual(regex.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
+
+ pat = regex.compile('((a)|(b))(c)?')
+ self.assertEqual(pat.match('a')[:], ('a', 'a', 'a', None, None))
+ self.assertEqual(pat.match('b')[:], ('b', 'b', None, 'b', None))
+ self.assertEqual(pat.match('ac')[:], ('ac', 'a', 'a', None, 'c'))
+ self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c'))
+ self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c'))
+
+ # A single group.
+ m = regex.match('(a)', 'a')
+ self.assertEqual(m.group(), 'a')
+ self.assertEqual(m.group(0), 'a')
+ self.assertEqual(m.group(1), 'a')
+ self.assertEqual(m.group(1, 1), ('a', 'a'))
+
+ pat = regex.compile('(?:(?Pa)|(?Pb))(?Pc)?')
+ self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
+ self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b',
+ None))
+ self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
+
+ def test_re_groupref_exists(self):
+ self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a)')[:],
+ ('(a)', '(', 'a'))
+ self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a')[:], ('a',
+ None, 'a'))
+ self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a)'), None)
+ self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a'), None)
+ self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'ab')[:], ('ab',
+ 'a', 'b'))
+ self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'cd')[:], ('cd',
+ None, 'd'))
+ self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'cd')[:], ('cd',
+ None, 'd'))
+ self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'a')[:], ('a',
+ 'a', ''))
+
+ # Tests for bug #1177831: exercise groups other than the first group.
+ p = regex.compile('(?Pa)(?Pb)?((?(g2)c|d))')
+ self.assertEqual(p.match('abc')[:], ('abc', 'a', 'b', 'c'))
+ self.assertEqual(p.match('ad')[:], ('ad', 'a', None, 'd'))
+ self.assertEqual(p.match('abd'), None)
+ self.assertEqual(p.match('ac'), None)
+
+ def test_re_groupref(self):
+ self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a|')[:], ('|a|',
+ '|', 'a'))
+ self.assertEqual(regex.match(r'^(\|)?([^()]+)\1?$', 'a')[:], ('a',
+ None, 'a'))
+ self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
+ self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a'), None)
+ self.assertEqual(regex.match(r'^(?:(a)|c)(\1)$', 'aa')[:], ('aa', 'a',
+ 'a'))
+ self.assertEqual(regex.match(r'^(?:(a)|c)(\1)?$', 'c')[:], ('c', None,
+ None))
+
+ self.assertEqual(regex.findall(r"(?i)(.{1,40}?),(.{1,40}?)(?:;)+(.{1,80}).{1,40}?\3(\ |;)+(.{1,80}?)\1",
+ "TEST, BEST; LEST ; Lest 123 Test, Best"), [('TEST', ' BEST',
+ ' LEST', ' ', '123 ')])
+
+ def test_groupdict(self):
+ self.assertEqual(regex.match('(?Pfirst) (?Psecond)',
+ 'first second').groupdict(), {'first': 'first', 'second': 'second'})
+
+ def test_expand(self):
+ self.assertEqual(regex.match("(?Pfirst) (?Psecond)",
+ "first second").expand(r"\2 \1 \g \g"),
+ 'second first second first')
+
+ def test_repeat_minmax(self):
+ self.assertEqual(regex.match(r"^(\w){1}$", "abc"), None)
+ self.assertEqual(regex.match(r"^(\w){1}?$", "abc"), None)
+ self.assertEqual(regex.match(r"^(\w){1,2}$", "abc"), None)
+ self.assertEqual(regex.match(r"^(\w){1,2}?$", "abc"), None)
+
+ self.assertEqual(regex.match(r"^(\w){3}$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){1,3}$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){1,4}$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){3}?$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){1,3}?$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){1,4}?$", "abc")[1], 'c')
+ self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c')
+
+ self.assertEqual(regex.match("^x{1}$", "xxx"), None)
+ self.assertEqual(regex.match("^x{1}?$", "xxx"), None)
+ self.assertEqual(regex.match("^x{1,2}$", "xxx"), None)
+ self.assertEqual(regex.match("^x{1,2}?$", "xxx"), None)
+
+ self.assertEqual(regex.match("^x{1}", "xxx")[0], 'x')
+ self.assertEqual(regex.match("^x{1}?", "xxx")[0], 'x')
+ self.assertEqual(regex.match("^x{0,1}", "xxx")[0], 'x')
+ self.assertEqual(regex.match("^x{0,1}?", "xxx")[0], '')
+
+ self.assertEqual(bool(regex.match("^x{3}$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{1,3}$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{1,4}$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{3}?$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{1,3}?$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{1,4}?$", "xxx")), True)
+ self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True)
+
+ self.assertEqual(regex.match("^x{}$", "xxx"), None)
+ self.assertEqual(bool(regex.match("^x{}$", "x{}")), True)
+
+ def test_getattr(self):
+ self.assertEqual(regex.compile("(?i)(a)(b)").pattern, '(?i)(a)(b)')
+ self.assertEqual(regex.compile("(?i)(a)(b)").flags, regex.I | regex.U |
+ regex.DEFAULT_VERSION)
+ self.assertEqual(regex.compile(b"(?i)(a)(b)").flags, regex.A | regex.I
+ | regex.DEFAULT_VERSION)
+ self.assertEqual(regex.compile("(?i)(a)(b)").groups, 2)
+ self.assertEqual(regex.compile("(?i)(a)(b)").groupindex, {})
+
+ self.assertEqual(regex.compile("(?i)(?Pa)(?Pb)").groupindex,
+ {'first': 1, 'other': 2})
+
+ self.assertEqual(regex.match("(a)", "a").pos, 0)
+ self.assertEqual(regex.match("(a)", "a").endpos, 1)
+
+ self.assertEqual(regex.search("b(c)", "abcdef").pos, 0)
+ self.assertEqual(regex.search("b(c)", "abcdef").endpos, 6)
+ self.assertEqual(regex.search("b(c)", "abcdef").span(), (1, 3))
+ self.assertEqual(regex.search("b(c)", "abcdef").span(1), (2, 3))
+
+ self.assertEqual(regex.match("(a)", "a").string, 'a')
+ self.assertEqual(regex.match("(a)", "a").regs, ((0, 1), (0, 1)))
+ self.assertEqual(repr(type(regex.match("(a)", "a").re)),
+ self.PATTERN_CLASS)
+
+ # Issue 14260.
+ p = regex.compile(r'abc(?Pdef)')
+ p.groupindex["n"] = 0
+ self.assertEqual(p.groupindex["n"], 1)
+
+ def test_special_escapes(self):
+ self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx")[1], 'bx')
+ self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd")[1], 'bx')
+ self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx",
+ regex.LOCALE)[1], b'bx')
+ self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd",
+ regex.LOCALE)[1], b'bx')
+ self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx",
+ regex.UNICODE)[1], 'bx')
+ self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd",
+ regex.UNICODE)[1], 'bx')
+
+ self.assertEqual(regex.search(r"^abc$", "\nabc\n", regex.M)[0], 'abc')
+ self.assertEqual(regex.search(r"^\Aabc\Z$", "abc", regex.M)[0], 'abc')
+ self.assertEqual(regex.search(r"^\Aabc\Z$", "\nabc\n", regex.M), None)
+
+ self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx")[1],
+ b'bx')
+ self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd")[1],
+ b'bx')
+ self.assertEqual(regex.search(br"^abc$", b"\nabc\n", regex.M)[0],
+ b'abc')
+ self.assertEqual(regex.search(br"^\Aabc\Z$", b"abc", regex.M)[0],
+ b'abc')
+ self.assertEqual(regex.search(br"^\Aabc\Z$", b"\nabc\n", regex.M),
+ None)
+
+ self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a")[0], '1aa! a')
+ self.assertEqual(regex.search(br"\d\D\w\W\s\S", b"1aa! a",
+ regex.LOCALE)[0], b'1aa! a')
+ self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a",
+ regex.UNICODE)[0], '1aa! a')
+
+ def test_bigcharset(self):
+ self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222")[1],
+ '\u2222')
+ self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222",
+ regex.UNICODE)[1], '\u2222')
+ self.assertEqual("".join(regex.findall(".",
+ "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)),
+ 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117')
+ self.assertEqual("".join(regex.findall(r"[e\xe8\xe9\xea\xeb\u0113\u011b\u0117]",
+ "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)),
+ 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117')
+ self.assertEqual("".join(regex.findall(r"e|\xe8|\xe9|\xea|\xeb|\u0113|\u011b|\u0117",
+ "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)),
+ 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117')
+
+ def test_anyall(self):
+ self.assertEqual(regex.match("a.b", "a\nb", regex.DOTALL)[0], "a\nb")
+ self.assertEqual(regex.match("a.*b", "a\n\nb", regex.DOTALL)[0],
+ "a\n\nb")
+
+ def test_non_consuming(self):
+ self.assertEqual(regex.match(r"(a(?=\s[^a]))", "a b")[1], 'a')
+ self.assertEqual(regex.match(r"(a(?=\s[^a]*))", "a b")[1], 'a')
+ self.assertEqual(regex.match(r"(a(?=\s[abc]))", "a b")[1], 'a')
+ self.assertEqual(regex.match(r"(a(?=\s[abc]*))", "a bc")[1], 'a')
+ self.assertEqual(regex.match(r"(a)(?=\s\1)", "a a")[1], 'a')
+ self.assertEqual(regex.match(r"(a)(?=\s\1*)", "a aa")[1], 'a')
+ self.assertEqual(regex.match(r"(a)(?=\s(abc|a))", "a a")[1], 'a')
+
+ self.assertEqual(regex.match(r"(a(?!\s[^a]))", "a a")[1], 'a')
+ self.assertEqual(regex.match(r"(a(?!\s[abc]))", "a d")[1], 'a')
+ self.assertEqual(regex.match(r"(a)(?!\s\1)", "a b")[1], 'a')
+ self.assertEqual(regex.match(r"(a)(?!\s(abc|a))", "a b")[1], 'a')
+
+ def test_ignore_case(self):
+ self.assertEqual(regex.match("abc", "ABC", regex.I)[0], 'ABC')
+ self.assertEqual(regex.match(b"abc", b"ABC", regex.I)[0], b'ABC')
+
+ self.assertEqual(regex.match(r"(a\s[^a]*)", "a bb", regex.I)[1],
+ 'a bb')
+ self.assertEqual(regex.match(r"(a\s[abc])", "a b", regex.I)[1], 'a b')
+ self.assertEqual(regex.match(r"(a\s[abc]*)", "a bb", regex.I)[1],
+ 'a bb')
+ self.assertEqual(regex.match(r"((a)\s\2)", "a a", regex.I)[1], 'a a')
+ self.assertEqual(regex.match(r"((a)\s\2*)", "a aa", regex.I)[1],
+ 'a aa')
+ self.assertEqual(regex.match(r"((a)\s(abc|a))", "a a", regex.I)[1],
+ 'a a')
+ self.assertEqual(regex.match(r"((a)\s(abc|a)*)", "a aa", regex.I)[1],
+ 'a aa')
+
+ # Issue 3511.
+ self.assertEqual(regex.match(r"[Z-a]", "_").span(), (0, 1))
+ self.assertEqual(regex.match(r"(?i)[Z-a]", "_").span(), (0, 1))
+
+ self.assertEqual(bool(regex.match(r"(?i)nao", "nAo")), True)
+ self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "n\xC3o")), True)
+ self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "N\xC3O")), True)
+ self.assertEqual(bool(regex.match(r"(?i)s", "\u017F")), True)
+
+ def test_case_folding(self):
+ self.assertEqual(regex.search(r"(?fi)ss", "SS").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)SS", "ss").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)SS",
+ "\N{LATIN SMALL LETTER SHARP S}").span(), (0, 1))
+ self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LETTER SHARP S}",
+ "SS").span(), (0, 2))
+
+ self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}",
+ "ST").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)ST",
+ "\N{LATIN SMALL LIGATURE ST}").span(), (0, 1))
+ self.assertEqual(regex.search(r"(?fi)ST",
+ "\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 1))
+
+ self.assertEqual(regex.search(r"(?fi)SST",
+ "\N{LATIN SMALL LETTER SHARP S}t").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)SST",
+ "s\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)SST",
+ "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}",
+ "SST").span(), (1, 3))
+ self.assertEqual(regex.search(r"(?fi)SST",
+ "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2))
+
+ self.assertEqual(regex.search(r"(?fi)FFI",
+ "\N{LATIN SMALL LIGATURE FFI}").span(), (0, 1))
+ self.assertEqual(regex.search(r"(?fi)FFI",
+ "\N{LATIN SMALL LIGATURE FF}i").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)FFI",
+ "f\N{LATIN SMALL LIGATURE FI}").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FFI}",
+ "FFI").span(), (0, 3))
+ self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FF}i",
+ "FFI").span(), (0, 3))
+ self.assertEqual(regex.search(r"(?fi)f\N{LATIN SMALL LIGATURE FI}",
+ "FFI").span(), (0, 3))
+
+ sigma = "\u03A3\u03C3\u03C2"
+ for ch1 in sigma:
+ for ch2 in sigma:
+ if not regex.match(r"(?fi)" + ch1, ch2):
+ self.fail()
+
+ self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB01\uFB00")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB01\uFB00")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03",
+ "\uFB00\uFB01")), True)
+ self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03",
+ "\uFB00\uFB01")), True)
+ self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")),
+ True)
+ self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")),
+ True)
+
+ self.assertEqual(regex.findall(r"(?iV0)\m(?:word){e<=3}\M(?ne", "affine",
+ options=["\N{LATIN SMALL LIGATURE FFI}"]).span(), (0, 6))
+ self.assertEqual(regex.search(r"(?fi)a\Lne",
+ "a\N{LATIN SMALL LIGATURE FFI}ne", options=["ffi"]).span(), (0, 4))
+
+ def test_category(self):
+ self.assertEqual(regex.match(r"(\s)", " ")[1], ' ')
+
+ def test_not_literal(self):
+ self.assertEqual(regex.search(r"\s([^a])", " b")[1], 'b')
+ self.assertEqual(regex.search(r"\s([^a]*)", " bb")[1], 'bb')
+
+ def test_search_coverage(self):
+ self.assertEqual(regex.search(r"\s(b)", " b")[1], 'b')
+ self.assertEqual(regex.search(r"a\s", "a ")[0], 'a ')
+
+ def test_re_escape(self):
+ p = ""
+ self.assertEqual(regex.escape(p), p)
+ for i in range(0, 256):
+ p += chr(i)
+ self.assertEqual(bool(regex.match(regex.escape(chr(i)), chr(i))),
+ True)
+ self.assertEqual(regex.match(regex.escape(chr(i)), chr(i)).span(),
+ (0, 1))
+
+ pat = regex.compile(regex.escape(p))
+ self.assertEqual(pat.match(p).span(), (0, 256))
+
+ def test_re_escape_byte(self):
+ p = b""
+ self.assertEqual(regex.escape(p), p)
+ for i in range(0, 256):
+ b = bytes([i])
+ p += b
+ self.assertEqual(bool(regex.match(regex.escape(b), b)), True)
+ self.assertEqual(regex.match(regex.escape(b), b).span(), (0, 1))
+
+ pat = regex.compile(regex.escape(p))
+ self.assertEqual(pat.match(p).span(), (0, 256))
+
+ def test_constants(self):
+ if regex.I != regex.IGNORECASE:
+ self.fail()
+ if regex.L != regex.LOCALE:
+ self.fail()
+ if regex.M != regex.MULTILINE:
+ self.fail()
+ if regex.S != regex.DOTALL:
+ self.fail()
+ if regex.X != regex.VERBOSE:
+ self.fail()
+
+ def test_flags(self):
+ for flag in [regex.I, regex.M, regex.X, regex.S, regex.L]:
+ self.assertEqual(repr(type(regex.compile('^pattern$', flag))),
+ self.PATTERN_CLASS)
+
+ def test_sre_character_literals(self):
+ for i in [0, 8, 16, 32, 64, 127, 128, 255]:
+ self.assertEqual(bool(regex.match(r"\%03o" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"\%03o0" % i, chr(i) + "0")),
+ True)
+ self.assertEqual(bool(regex.match(r"\%03o8" % i, chr(i) + "8")),
+ True)
+ self.assertEqual(bool(regex.match(r"\x%02x" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"\x%02x0" % i, chr(i) + "0")),
+ True)
+ self.assertEqual(bool(regex.match(r"\x%02xz" % i, chr(i) + "z")),
+ True)
+
+ self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda:
+ regex.match(r"\911", ""))
+
+ def test_sre_character_class_literals(self):
+ for i in [0, 8, 16, 32, 64, 127, 128, 255]:
+ self.assertEqual(bool(regex.match(r"[\%03o]" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"[\%03o0]" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"[\%03o8]" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"[\x%02x]" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"[\x%02x0]" % i, chr(i))), True)
+ self.assertEqual(bool(regex.match(r"[\x%02xz]" % i, chr(i))), True)
+
+ self.assertRaisesRegex(regex.error, self.BAD_OCTAL_ESCAPE, lambda:
+ regex.match(r"[\911]", ""))
+
+ def test_bug_113254(self):
+ self.assertEqual(regex.match(r'(a)|(b)', 'b').start(1), -1)
+ self.assertEqual(regex.match(r'(a)|(b)', 'b').end(1), -1)
+ self.assertEqual(regex.match(r'(a)|(b)', 'b').span(1), (-1, -1))
+
+ def test_bug_527371(self):
+ # Bug described in patches 527371/672491.
+ self.assertEqual(regex.match(r'(a)?a','a').lastindex, None)
+ self.assertEqual(regex.match(r'(a)(b)?b','ab').lastindex, 1)
+ self.assertEqual(regex.match(r'(?Pa)(?Pb)?b','ab').lastgroup,
+ 'a')
+ self.assertEqual(regex.match("(?Pa(b))", "ab").lastgroup, 'a')
+ self.assertEqual(regex.match("((a))", "a").lastindex, 1)
+
+ def test_bug_545855(self):
+ # Bug 545855 -- This pattern failed to cause a compile error as it
+ # should, instead provoking a TypeError.
+ self.assertRaisesRegex(regex.error, self.BAD_SET, lambda:
+ regex.compile('foo[a-'))
+
+ def test_bug_418626(self):
+ # Bugs 418626 at al. -- Testing Greg Chapman's addition of op code
+ # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
+ # pattern '*?' on a long string.
+ self.assertEqual(regex.match('.*?c', 10000 * 'ab' + 'cd').end(0),
+ 20001)
+ self.assertEqual(regex.match('.*?cd', 5000 * 'ab' + 'c' + 5000 * 'ab' +
+ 'cde').end(0), 20003)
+ self.assertEqual(regex.match('.*?cd', 20000 * 'abc' + 'de').end(0),
+ 60001)
+ # Non-simple '*?' still used to hit the recursion limit, before the
+ # non-recursive scheme was implemented.
+ self.assertEqual(regex.search('(a|b)*?c', 10000 * 'ab' + 'cd').end(0),
+ 20001)
+
+ def test_bug_612074(self):
+ pat = "[" + regex.escape("\u2039") + "]"
+ self.assertEqual(regex.compile(pat) and 1, 1)
+
+ def test_stack_overflow(self):
+ # Nasty cases that used to overflow the straightforward recursive
+ # implementation of repeated groups.
+ self.assertEqual(regex.match('(x)*', 50000 * 'x')[1], 'x')
+ self.assertEqual(regex.match('(x)*y', 50000 * 'x' + 'y')[1], 'x')
+ self.assertEqual(regex.match('(x)*?y', 50000 * 'x' + 'y')[1], 'x')
+
+ def test_scanner(self):
+ def s_ident(scanner, token): return token
+ def s_operator(scanner, token): return "op%s" % token
+ def s_float(scanner, token): return float(token)
+ def s_int(scanner, token): return int(token)
+
+ scanner = regex.Scanner([(r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*",
+ s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+",
+ None), ])
+
+ self.assertEqual(repr(type(scanner.scanner.scanner("").pattern)),
+ self.PATTERN_CLASS)
+
+ self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum',
+ 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], ''))
+
+ def test_bug_448951(self):
+ # Bug 448951 (similar to 429357, but with single char match).
+ # (Also test greedy matches.)
+ for op in '', '?', '*':
+ self.assertEqual(regex.match(r'((.%s):)?z' % op, 'z')[:], ('z',
+ None, None))
+ self.assertEqual(regex.match(r'((.%s):)?z' % op, 'a:z')[:], ('a:z',
+ 'a:', 'a'))
+
+ def test_bug_725106(self):
+ # Capturing groups in alternatives in repeats.
+ self.assertEqual(regex.match('^((a)|b)*', 'abc')[:], ('ab', 'b', 'a'))
+ self.assertEqual(regex.match('^(([ab])|c)*', 'abc')[:], ('abc', 'c',
+ 'b'))
+ self.assertEqual(regex.match('^((d)|[ab])*', 'abc')[:], ('ab', 'b',
+ None))
+ self.assertEqual(regex.match('^((a)c|[ab])*', 'abc')[:], ('ab', 'b',
+ None))
+ self.assertEqual(regex.match('^((a)|b)*?c', 'abc')[:], ('abc', 'b',
+ 'a'))
+ self.assertEqual(regex.match('^(([ab])|c)*?d', 'abcd')[:], ('abcd',
+ 'c', 'b'))
+ self.assertEqual(regex.match('^((d)|[ab])*?c', 'abc')[:], ('abc', 'b',
+ None))
+ self.assertEqual(regex.match('^((a)c|[ab])*?c', 'abc')[:], ('abc', 'b',
+ None))
+
+ def test_bug_725149(self):
+ # Mark_stack_base restoring before restoring marks.
+ self.assertEqual(regex.match('(a)(?:(?=(b)*)c)*', 'abb')[:], ('a', 'a',
+ None))
+ self.assertEqual(regex.match('(a)((?!(b)*))*', 'abb')[:], ('a', 'a',
+ None, None))
+
+ def test_bug_764548(self):
+ # Bug 764548, regex.compile() barfs on str/unicode subclasses.
+ class my_unicode(str): pass
+ pat = regex.compile(my_unicode("abc"))
+ self.assertEqual(pat.match("xyz"), None)
+
+ def test_finditer(self):
+ it = regex.finditer(r":+", "a:b::c:::d")
+ self.assertEqual([item[0] for item in it], [':', '::', ':::'])
+
+ def test_bug_926075(self):
+ if regex.compile('bug_926075') is regex.compile(b'bug_926075'):
+ self.fail()
+
+ def test_bug_931848(self):
+ pattern = "[\u002E\u3002\uFF0E\uFF61]"
+ self.assertEqual(regex.compile(pattern).split("a.b.c"), ['a', 'b',
+ 'c'])
+
+ def test_bug_581080(self):
+ it = regex.finditer(r"\s", "a b")
+ self.assertEqual(next(it).span(), (1, 2))
+ self.assertRaises(StopIteration, lambda: next(it))
+
+ scanner = regex.compile(r"\s").scanner("a b")
+ self.assertEqual(scanner.search().span(), (1, 2))
+ self.assertEqual(scanner.search(), None)
+
+ def test_bug_817234(self):
+ it = regex.finditer(r".*", "asdf")
+ self.assertEqual(next(it).span(), (0, 4))
+ self.assertEqual(next(it).span(), (4, 4))
+ self.assertRaises(StopIteration, lambda: next(it))
+
+ def test_empty_array(self):
+ # SF buf 1647541.
+ import array
+ for typecode in 'bBuhHiIlLfd':
+ a = array.array(typecode)
+ self.assertEqual(regex.compile(b"bla").match(a), None)
+ self.assertEqual(regex.compile(b"").match(a)[1 : ], ())
+
+ def test_inline_flags(self):
+ # Bug #1700.
+ upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Below
+ lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Below
+
+ p = regex.compile(upper_char, regex.I | regex.U)
+ self.assertEqual(bool(p.match(lower_char)), True)
+
+ p = regex.compile(lower_char, regex.I | regex.U)
+ self.assertEqual(bool(p.match(upper_char)), True)
+
+ p = regex.compile('(?i)' + upper_char, regex.U)
+ self.assertEqual(bool(p.match(lower_char)), True)
+
+ p = regex.compile('(?i)' + lower_char, regex.U)
+ self.assertEqual(bool(p.match(upper_char)), True)
+
+ p = regex.compile('(?iu)' + upper_char)
+ self.assertEqual(bool(p.match(lower_char)), True)
+
+ p = regex.compile('(?iu)' + lower_char)
+ self.assertEqual(bool(p.match(upper_char)), True)
+
+ # Changed to positional flags in regex 2023.12.23.
+ self.assertEqual(bool(regex.match(r"(?i)a", "A")), True)
+ self.assertEqual(regex.match(r"a(?i)", "A"), None)
+
+ def test_dollar_matches_twice(self):
+ # $ matches the end of string, and just before the terminating \n.
+ pattern = regex.compile('$')
+ self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
+ self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
+ self.assertEqual(pattern.sub('#', '\n'), '#\n#')
+
+ pattern = regex.compile('$', regex.MULTILINE)
+ self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#')
+ self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
+ self.assertEqual(pattern.sub('#', '\n'), '#\n#')
+
+ def test_bytes_str_mixing(self):
+ # Mixing str and bytes is disallowed.
+ pat = regex.compile('.')
+ bpat = regex.compile(b'.')
+ self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda:
+ pat.match(b'b'))
+ self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda:
+ bpat.match('b'))
+ self.assertRaisesRegex(TypeError, self.STR_PAT_BYTES_TEMPL, lambda:
+ pat.sub(b'b', 'c'))
+ self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda:
+ pat.sub('b', b'c'))
+ self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda:
+ pat.sub(b'b', b'c'))
+ self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda:
+ bpat.sub(b'b', 'c'))
+ self.assertRaisesRegex(TypeError, self.BYTES_PAT_STR_TEMPL, lambda:
+ bpat.sub('b', b'c'))
+ self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda:
+ bpat.sub('b', 'c'))
+
+ self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda:
+ regex.compile(br'\w', regex.UNICODE))
+ self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda:
+ regex.compile(br'(?u)\w'))
+ self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda:
+ regex.compile(r'\w', regex.UNICODE | regex.ASCII))
+ self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda:
+ regex.compile(r'(?u)\w', regex.ASCII))
+ self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda:
+ regex.compile(r'(?a)\w', regex.UNICODE))
+ self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda:
+ regex.compile(r'(?au)\w'))
+
+ def test_ascii_and_unicode_flag(self):
+ # String patterns.
+ for flags in (0, regex.UNICODE):
+ pat = regex.compile('\xc0', flags | regex.IGNORECASE)
+ self.assertEqual(bool(pat.match('\xe0')), True)
+ pat = regex.compile(r'\w', flags)
+ self.assertEqual(bool(pat.match('\xe0')), True)
+
+ pat = regex.compile('\xc0', regex.ASCII | regex.IGNORECASE)
+ self.assertEqual(pat.match('\xe0'), None)
+ pat = regex.compile('(?a)\xc0', regex.IGNORECASE)
+ self.assertEqual(pat.match('\xe0'), None)
+ pat = regex.compile(r'\w', regex.ASCII)
+ self.assertEqual(pat.match('\xe0'), None)
+ pat = regex.compile(r'(?a)\w')
+ self.assertEqual(pat.match('\xe0'), None)
+
+ # Bytes patterns.
+ for flags in (0, regex.ASCII):
+ pat = regex.compile(b'\xc0', flags | regex.IGNORECASE)
+ self.assertEqual(pat.match(b'\xe0'), None)
+ pat = regex.compile(br'\w')
+ self.assertEqual(pat.match(b'\xe0'), None)
+
+ self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda:
+ regex.compile(r'(?au)\w'))
+
+ def test_subscripting_match(self):
+ m = regex.match(r'(?\w)', 'xy')
+ if not m:
+ self.fail("Failed: expected match but returned None")
+ elif not m or m[0] != m.group(0) or m[1] != m.group(1):
+ self.fail("Failed")
+ if not m:
+ self.fail("Failed: expected match but returned None")
+ elif m[:] != ('x', 'x'):
+ self.fail("Failed: expected \"('x', 'x')\" but got {} instead".format(ascii(m[:])))
+
+ def test_new_named_groups(self):
+ m0 = regex.match(r'(?P\w)', 'x')
+ m1 = regex.match(r'(?\w)', 'x')
+ if not (m0 and m1 and m0[:] == m1[:]):
+ self.fail("Failed")
+
+ def test_properties(self):
+ self.assertEqual(regex.match(b'(?ai)\xC0', b'\xE0'), None)
+ self.assertEqual(regex.match(br'(?ai)\xC0', b'\xE0'), None)
+ self.assertEqual(regex.match(br'(?a)\w', b'\xE0'), None)
+ self.assertEqual(bool(regex.match(r'\w', '\xE0')), True)
+
+ # Dropped the following test. It's not possible to determine what the
+ # correct result should be in the general case.
+# self.assertEqual(bool(regex.match(br'(?L)\w', b'\xE0')),
+# b'\xE0'.isalnum())
+
+ self.assertEqual(bool(regex.match(br'(?L)\d', b'0')), True)
+ self.assertEqual(bool(regex.match(br'(?L)\s', b' ')), True)
+ self.assertEqual(bool(regex.match(br'(?L)\w', b'a')), True)
+ self.assertEqual(regex.match(br'(?L)\d', b'?'), None)
+ self.assertEqual(regex.match(br'(?L)\s', b'?'), None)
+ self.assertEqual(regex.match(br'(?L)\w', b'?'), None)
+
+ self.assertEqual(regex.match(br'(?L)\D', b'0'), None)
+ self.assertEqual(regex.match(br'(?L)\S', b' '), None)
+ self.assertEqual(regex.match(br'(?L)\W', b'a'), None)
+ self.assertEqual(bool(regex.match(br'(?L)\D', b'?')), True)
+ self.assertEqual(bool(regex.match(br'(?L)\S', b'?')), True)
+ self.assertEqual(bool(regex.match(br'(?L)\W', b'?')), True)
+
+ self.assertEqual(bool(regex.match(r'\p{Cyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'(?i)\p{Cyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{IsCyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{Script=Cyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{InCyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{Block=Cyrillic}',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:Cyrillic:]]',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:IsCyrillic:]]',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:Script=Cyrillic:]]',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:InCyrillic:]]',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:Block=Cyrillic:]]',
+ '\N{CYRILLIC CAPITAL LETTER A}')), True)
+
+ self.assertEqual(bool(regex.match(r'\P{Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\P{IsCyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\P{Script=Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\P{InCyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\P{Block=Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{^Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{^IsCyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{^Script=Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{^InCyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'\p{^Block=Cyrillic}',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:^Cyrillic:]]',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:^IsCyrillic:]]',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:^Script=Cyrillic:]]',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:^InCyrillic:]]',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+ self.assertEqual(bool(regex.match(r'[[:^Block=Cyrillic:]]',
+ '\N{LATIN CAPITAL LETTER A}')), True)
+
+ self.assertEqual(bool(regex.match(r'\d', '0')), True)
+ self.assertEqual(bool(regex.match(r'\s', ' ')), True)
+ self.assertEqual(bool(regex.match(r'\w', 'A')), True)
+ self.assertEqual(regex.match(r"\d", "?"), None)
+ self.assertEqual(regex.match(r"\s", "?"), None)
+ self.assertEqual(regex.match(r"\w", "?"), None)
+ self.assertEqual(regex.match(r"\D", "0"), None)
+ self.assertEqual(regex.match(r"\S", " "), None)
+ self.assertEqual(regex.match(r"\W", "A"), None)
+ self.assertEqual(bool(regex.match(r'\D', '?')), True)
+ self.assertEqual(bool(regex.match(r'\S', '?')), True)
+ self.assertEqual(bool(regex.match(r'\W', '?')), True)
+
+ self.assertEqual(bool(regex.match(r'\p{L}', 'A')), True)
+ self.assertEqual(bool(regex.match(r'\p{L}', 'a')), True)
+ self.assertEqual(bool(regex.match(r'\p{Lu}', 'A')), True)
+ self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True)
+
+ self.assertEqual(bool(regex.match(r'(?i)a', 'a')), True)
+ self.assertEqual(bool(regex.match(r'(?i)a', 'A')), True)
+
+ self.assertEqual(bool(regex.match(r'\w', '0')), True)
+ self.assertEqual(bool(regex.match(r'\w', 'a')), True)
+ self.assertEqual(bool(regex.match(r'\w', '_')), True)
+
+ self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1))
+ self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2))
+ self.assertEqual(regex.findall(r"\X",
+ "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e',
+ '\xe9', 'e\u0301'])
+ self.assertEqual(regex.findall(r"\X{3}",
+ "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301'])
+ self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"),
+ ['\r', '\r\n', '\u0301', 'A\u0301'])
+
+ self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True)
+
+ chars_u = "-09AZaz_\u0393\u03b3"
+ chars_b = b"-09AZaz_"
+ word_set = set("Ll Lm Lo Lt Lu Mc Me Mn Nd Nl No Pc".split())
+
+ tests = [
+ (r"\w", chars_u, "09AZaz_\u0393\u03b3"),
+ (r"[[:word:]]", chars_u, "09AZaz_\u0393\u03b3"),
+ (r"\W", chars_u, "-"),
+ (r"[[:^word:]]", chars_u, "-"),
+ (r"\d", chars_u, "09"),
+ (r"[[:digit:]]", chars_u, "09"),
+ (r"\D", chars_u, "-AZaz_\u0393\u03b3"),
+ (r"[[:^digit:]]", chars_u, "-AZaz_\u0393\u03b3"),
+ (r"[[:alpha:]]", chars_u, "AZaz\u0393\u03b3"),
+ (r"[[:^alpha:]]", chars_u, "-09_"),
+ (r"[[:alnum:]]", chars_u, "09AZaz\u0393\u03b3"),
+ (r"[[:^alnum:]]", chars_u, "-_"),
+ (r"[[:xdigit:]]", chars_u, "09Aa"),
+ (r"[[:^xdigit:]]", chars_u, "-Zz_\u0393\u03b3"),
+ (r"\p{InBasicLatin}", "a\xE1", "a"),
+ (r"\P{InBasicLatin}", "a\xE1", "\xE1"),
+ (r"(?i)\p{InBasicLatin}", "a\xE1", "a"),
+ (r"(?i)\P{InBasicLatin}", "a\xE1", "\xE1"),
+
+ (br"(?L)\w", chars_b, b"09AZaz_"),
+ (br"(?L)[[:word:]]", chars_b, b"09AZaz_"),
+ (br"(?L)\W", chars_b, b"-"),
+ (br"(?L)[[:^word:]]", chars_b, b"-"),
+ (br"(?L)\d", chars_b, b"09"),
+ (br"(?L)[[:digit:]]", chars_b, b"09"),
+ (br"(?L)\D", chars_b, b"-AZaz_"),
+ (br"(?L)[[:^digit:]]", chars_b, b"-AZaz_"),
+ (br"(?L)[[:alpha:]]", chars_b, b"AZaz"),
+ (br"(?L)[[:^alpha:]]", chars_b, b"-09_"),
+ (br"(?L)[[:alnum:]]", chars_b, b"09AZaz"),
+ (br"(?L)[[:^alnum:]]", chars_b, b"-_"),
+ (br"(?L)[[:xdigit:]]", chars_b, b"09Aa"),
+ (br"(?L)[[:^xdigit:]]", chars_b, b"-Zz_"),
+
+ (br"(?a)\w", chars_b, b"09AZaz_"),
+ (br"(?a)[[:word:]]", chars_b, b"09AZaz_"),
+ (br"(?a)\W", chars_b, b"-"),
+ (br"(?a)[[:^word:]]", chars_b, b"-"),
+ (br"(?a)\d", chars_b, b"09"),
+ (br"(?a)[[:digit:]]", chars_b, b"09"),
+ (br"(?a)\D", chars_b, b"-AZaz_"),
+ (br"(?a)[[:^digit:]]", chars_b, b"-AZaz_"),
+ (br"(?a)[[:alpha:]]", chars_b, b"AZaz"),
+ (br"(?a)[[:^alpha:]]", chars_b, b"-09_"),
+ (br"(?a)[[:alnum:]]", chars_b, b"09AZaz"),
+ (br"(?a)[[:^alnum:]]", chars_b, b"-_"),
+ (br"(?a)[[:xdigit:]]", chars_b, b"09Aa"),
+ (br"(?a)[[:^xdigit:]]", chars_b, b"-Zz_"),
+ ]
+ for pattern, chars, expected in tests:
+ try:
+ if chars[ : 0].join(regex.findall(pattern, chars)) != expected:
+ self.fail("Failed: {}".format(pattern))
+ except Exception as e:
+ self.fail("Failed: {} raised {}".format(pattern, ascii(e)))
+
+ self.assertEqual(bool(regex.match(r"\p{NumericValue=0}", "0")),
+ True)
+ self.assertEqual(bool(regex.match(r"\p{NumericValue=1/2}",
+ "\N{VULGAR FRACTION ONE HALF}")), True)
+ self.assertEqual(bool(regex.match(r"\p{NumericValue=0.5}",
+ "\N{VULGAR FRACTION ONE HALF}")), True)
+
+ def test_word_class(self):
+ self.assertEqual(regex.findall(r"\w+",
+ " \u0939\u093f\u0928\u094d\u0926\u0940,"),
+ ['\u0939\u093f\u0928\u094d\u0926\u0940'])
+ self.assertEqual(regex.findall(r"\W+",
+ " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', ','])
+ self.assertEqual(regex.split(r"(?V1)\b",
+ " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ',
+ '\u0939\u093f\u0928\u094d\u0926\u0940', ','])
+ self.assertEqual(regex.split(r"(?V1)\B",
+ " \u0939\u093f\u0928\u094d\u0926\u0940,"), ['', ' \u0939',
+ '\u093f', '\u0928', '\u094d', '\u0926', '\u0940,', ''])
+
+ def test_search_anchor(self):
+ self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd'])
+
+ def test_search_reverse(self):
+ self.assertEqual(regex.findall(r"(?r).", "abc"), ['c', 'b', 'a'])
+ self.assertEqual(regex.findall(r"(?r).", "abc", overlapped=True), ['c',
+ 'b', 'a'])
+ self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc'])
+ self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True),
+ ['de', 'cd', 'bc', 'ab'])
+ self.assertEqual(regex.findall(r"(?r)(.)(-)(.)", "a-b-c",
+ overlapped=True), [("b", "-", "c"), ("a", "-", "b")])
+
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c',
+ 'b', 'a'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde",
+ overlapped=True)], ['de', 'cd', 'bc', 'ab'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c',
+ 'b', 'a'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde",
+ overlapped=True)], ['de', 'cd', 'bc', 'ab'])
+
+ self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo',
+ 'bar'])
+ self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo',
+ 'bar'])
+ self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', 'foo',
+ ''])
+ self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar',
+ 'foo', ''])
+
+ self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")],
+ ['', 'foo', 'bar'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+",
+ "foo bar")], ['', 'foo', 'bar'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+",
+ "foo bar")], ['bar', 'foo', ''])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+",
+ "foo bar")], ['bar', 'foo', ''])
+
+ self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd'])
+ self.assertEqual(regex.findall(r".{2}(?<=\G.*)", "abcd"), ['ab', 'cd'])
+ self.assertEqual(regex.findall(r"(?r)\G\w{2}", "abcd ef"), [])
+ self.assertEqual(regex.findall(r"(?r)\w{2}\G", "abcd ef"), ['ef'])
+
+ self.assertEqual(regex.findall(r"q*", "qqwe"), ['qq', '', '', ''])
+ self.assertEqual(regex.findall(r"(?V1)q*", "qqwe"), ['qq', '', '', ''])
+ self.assertEqual(regex.findall(r"(?r)q*", "qqwe"), ['', '', 'qq', ''])
+ self.assertEqual(regex.findall(r"(?rV1)q*", "qqwe"), ['', '', 'qq',
+ ''])
+
+ self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=3), ['b',
+ 'c'])
+ self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=-1), ['b',
+ 'c'])
+ self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1,
+ endpos=3)], ['b', 'c'])
+ self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1,
+ endpos=-1)], ['b', 'c'])
+
+ self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1,
+ endpos=3)], ['c', 'b'])
+ self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1,
+ endpos=-1)], ['c', 'b'])
+ self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=3), ['c',
+ 'b'])
+ self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=-1),
+ ['c', 'b'])
+
+ self.assertEqual(regex.findall(r"[ab]", "aB", regex.I), ['a', 'B'])
+ self.assertEqual(regex.findall(r"(?r)[ab]", "aB", regex.I), ['B', 'a'])
+
+ self.assertEqual(regex.findall(r"(?r).{2}", "abc"), ['bc'])
+ self.assertEqual(regex.findall(r"(?r).{2}", "abc", overlapped=True),
+ ['bc', 'ab'])
+ self.assertEqual(regex.findall(r"(\w+) (\w+)",
+ "first second third fourth fifth"), [('first', 'second'), ('third',
+ 'fourth')])
+ self.assertEqual(regex.findall(r"(?r)(\w+) (\w+)",
+ "first second third fourth fifth"), [('fourth', 'fifth'), ('second',
+ 'third')])
+
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc")],
+ ['bc'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc",
+ overlapped=True)], ['bc', 'ab'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(\w+) (\w+)",
+ "first second third fourth fifth")], ['first second',
+ 'third fourth'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)(\w+) (\w+)",
+ "first second third fourth fifth")], ['fourth fifth',
+ 'second third'])
+
+ self.assertEqual(regex.search("abcdef", "abcdef").span(), (0, 6))
+ self.assertEqual(regex.search("(?r)abcdef", "abcdef").span(), (0, 6))
+ self.assertEqual(regex.search("(?i)abcdef", "ABCDEF").span(), (0, 6))
+ self.assertEqual(regex.search("(?ir)abcdef", "ABCDEF").span(), (0, 6))
+
+ self.assertEqual(regex.sub(r"(.)", r"\1", "abc"), 'abc')
+ self.assertEqual(regex.sub(r"(?r)(.)", r"\1", "abc"), 'abc')
+
+ def test_atomic(self):
+ # Issue 433030.
+ self.assertEqual(regex.search(r"(?>a*)a", "aa"), None)
+
+ def test_possessive(self):
+ # Single-character non-possessive.
+ self.assertEqual(regex.search(r"a?a", "a").span(), (0, 1))
+ self.assertEqual(regex.search(r"a*a", "aaa").span(), (0, 3))
+ self.assertEqual(regex.search(r"a+a", "aaa").span(), (0, 3))
+ self.assertEqual(regex.search(r"a{1,3}a", "aaa").span(), (0, 3))
+
+ # Multiple-character non-possessive.
+ self.assertEqual(regex.search(r"(?:ab)?ab", "ab").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?:ab)*ab", "ababab").span(), (0, 6))
+ self.assertEqual(regex.search(r"(?:ab)+ab", "ababab").span(), (0, 6))
+ self.assertEqual(regex.search(r"(?:ab){1,3}ab", "ababab").span(), (0,
+ 6))
+
+ # Single-character possessive.
+ self.assertEqual(regex.search(r"a?+a", "a"), None)
+ self.assertEqual(regex.search(r"a*+a", "aaa"), None)
+ self.assertEqual(regex.search(r"a++a", "aaa"), None)
+ self.assertEqual(regex.search(r"a{1,3}+a", "aaa"), None)
+
+ # Multiple-character possessive.
+ self.assertEqual(regex.search(r"(?:ab)?+ab", "ab"), None)
+ self.assertEqual(regex.search(r"(?:ab)*+ab", "ababab"), None)
+ self.assertEqual(regex.search(r"(?:ab)++ab", "ababab"), None)
+ self.assertEqual(regex.search(r"(?:ab){1,3}+ab", "ababab"), None)
+
+ def test_zerowidth(self):
+ # Issue 3262.
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split(r"\b", "a b"), ['', 'a', ' ', 'b',
+ ''])
+ else:
+ self.assertEqual(regex.split(r"\b", "a b"), ['a b'])
+ self.assertEqual(regex.split(r"(?V1)\b", "a b"), ['', 'a', ' ', 'b',
+ ''])
+
+ # Issue 1647489.
+ self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo',
+ 'bar'])
+ self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")],
+ ['', 'foo', 'bar'])
+ self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar',
+ 'foo', ''])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+",
+ "foo bar")], ['bar', 'foo', ''])
+ self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo',
+ 'bar'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+",
+ "foo bar")], ['', 'foo', 'bar'])
+ self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar',
+ 'foo', ''])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+",
+ "foo bar")], ['bar', 'foo', ''])
+
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split("", "xaxbxc"), ['', 'x', 'a', 'x',
+ 'b', 'x', 'c', ''])
+ self.assertEqual([m for m in regex.splititer("", "xaxbxc")], ['',
+ 'x', 'a', 'x', 'b', 'x', 'c', ''])
+ else:
+ self.assertEqual(regex.split("", "xaxbxc"), ['xaxbxc'])
+ self.assertEqual([m for m in regex.splititer("", "xaxbxc")],
+ ['xaxbxc'])
+
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split("(?r)", "xaxbxc"), ['', 'c', 'x', 'b',
+ 'x', 'a', 'x', ''])
+ self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")],
+ ['', 'c', 'x', 'b', 'x', 'a', 'x', ''])
+ else:
+ self.assertEqual(regex.split("(?r)", "xaxbxc"), ['xaxbxc'])
+ self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")],
+ ['xaxbxc'])
+
+ self.assertEqual(regex.split("(?V1)", "xaxbxc"), ['', 'x', 'a', 'x',
+ 'b', 'x', 'c', ''])
+ self.assertEqual([m for m in regex.splititer("(?V1)", "xaxbxc")], ['',
+ 'x', 'a', 'x', 'b', 'x', 'c', ''])
+
+ self.assertEqual(regex.split("(?rV1)", "xaxbxc"), ['', 'c', 'x', 'b',
+ 'x', 'a', 'x', ''])
+ self.assertEqual([m for m in regex.splititer("(?rV1)", "xaxbxc")], ['',
+ 'c', 'x', 'b', 'x', 'a', 'x', ''])
+
+ def test_scoped_and_inline_flags(self):
+ # Issues 433028, 433024, 433027.
+ self.assertEqual(regex.search(r"(?i)Ab", "ab").span(), (0, 2))
+ self.assertEqual(regex.search(r"(?i:A)b", "ab").span(), (0, 2))
+ # Changed to positional flags in regex 2023.12.23.
+ self.assertEqual(regex.search(r"A(?i)b", "ab"), None)
+
+ self.assertEqual(regex.search(r"(?V0)Ab", "ab"), None)
+ self.assertEqual(regex.search(r"(?V1)Ab", "ab"), None)
+ self.assertEqual(regex.search(r"(?-i)Ab", "ab", flags=regex.I), None)
+ self.assertEqual(regex.search(r"(?-i:A)b", "ab", flags=regex.I), None)
+ self.assertEqual(regex.search(r"A(?-i)b", "ab", flags=regex.I).span(),
+ (0, 2))
+
+ def test_repeated_repeats(self):
+ # Issue 2537.
+ self.assertEqual(regex.search(r"(?:a+)+", "aaa").span(), (0, 3))
+ self.assertEqual(regex.search(r"(?:(?:ab)+c)+", "abcabc").span(), (0,
+ 6))
+
+ # Hg issue 286.
+ self.assertEqual(regex.search(r"(?:a+){2,}", "aaa").span(), (0, 3))
+
+ def test_lookbehind(self):
+ self.assertEqual(regex.search(r"123(?<=a\d+)", "a123").span(), (1, 4))
+ self.assertEqual(regex.search(r"123(?<=a\d+)", "b123"), None)
+ self.assertEqual(regex.search(r"123(?= (3, 7, 0):
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"),
+ 'y-x-')
+ else:
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"),
+ 'y-x')
+ self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "xy"), 'y-x-')
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x-')
+ else:
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x')
+ self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "x"), '-x-')
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y--')
+ else:
+ self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y-')
+ self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "y"), 'y--')
+
+ def test_bug_10328 (self):
+ # Issue 10328.
+ pat = regex.compile(r'(?mV0)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)')
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>',
+ 'foobar '), ('foobar', 2))
+ else:
+ self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>',
+ 'foobar '), ('foobar', 1))
+ self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ',
+ ''])
+ pat = regex.compile(r'(?mV1)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)')
+ self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>',
+ 'foobar '), ('foobar', 2))
+ self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ',
+ ''])
+
+ def test_overlapped(self):
+ self.assertEqual(regex.findall(r"..", "abcde"), ['ab', 'cd'])
+ self.assertEqual(regex.findall(r"..", "abcde", overlapped=True), ['ab',
+ 'bc', 'cd', 'de'])
+ self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc'])
+ self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True),
+ ['de', 'cd', 'bc', 'ab'])
+ self.assertEqual(regex.findall(r"(.)(-)(.)", "a-b-c", overlapped=True),
+ [("a", "-", "b"), ("b", "-", "c")])
+
+ self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde")], ['ab',
+ 'cd'])
+ self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde",
+ overlapped=True)], ['ab', 'bc', 'cd', 'de'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde")],
+ ['de', 'bc'])
+ self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde",
+ overlapped=True)], ['de', 'cd', 'bc', 'ab'])
+
+ self.assertEqual([m.groups() for m in regex.finditer(r"(.)(-)(.)",
+ "a-b-c", overlapped=True)], [("a", "-", "b"), ("b", "-", "c")])
+ self.assertEqual([m.groups() for m in regex.finditer(r"(?r)(.)(-)(.)",
+ "a-b-c", overlapped=True)], [("b", "-", "c"), ("a", "-", "b")])
+
+ def test_splititer(self):
+ self.assertEqual(regex.split(r",", "a,b,,c,"), ['a', 'b', '', 'c', ''])
+ self.assertEqual([m for m in regex.splititer(r",", "a,b,,c,")], ['a',
+ 'b', '', 'c', ''])
+
+ def test_grapheme(self):
+ self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1))
+ self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2))
+
+ self.assertEqual(regex.findall(r"\X",
+ "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e',
+ '\xe9', 'e\u0301'])
+ self.assertEqual(regex.findall(r"\X{3}",
+ "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301'])
+ self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"),
+ ['\r', '\r\n', '\u0301', 'A\u0301'])
+
+ def test_word_boundary(self):
+ text = 'The quick ("brown") fox can\'t jump 32.3 feet, right?'
+ self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ',
+ 'quick', ' ("', 'brown', '") ', 'fox', ' ', 'can', "'", 't',
+ ' ', 'jump', ' ', '32', '.', '3', ' ', 'feet', ', ',
+ 'right', '?'])
+ self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ',
+ 'quick', ' ', '(', '"', 'brown', '"', ')', ' ', 'fox', ' ',
+ "can't", ' ', 'jump', ' ', '32.3', ' ', 'feet', ',', ' ',
+ 'right', '?', ''])
+
+ text = "The fox"
+ self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ',
+ 'fox', ''])
+ self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ',
+ 'fox', ''])
+
+ text = "can't aujourd'hui l'objectif"
+ self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'can', "'",
+ 't', ' ', 'aujourd', "'", 'hui', ' ', 'l', "'", 'objectif',
+ ''])
+ self.assertEqual(regex.split(r'(?V1w)\b', text), ['', "can't", ' ',
+ "aujourd'hui", ' ', "l'objectif", ''])
+
+ def test_line_boundary(self):
+ self.assertEqual(regex.findall(r".+", "Line 1\nLine 2\n"), ["Line 1",
+ "Line 2"])
+ self.assertEqual(regex.findall(r".+", "Line 1\rLine 2\r"),
+ ["Line 1\rLine 2\r"])
+ self.assertEqual(regex.findall(r".+", "Line 1\r\nLine 2\r\n"),
+ ["Line 1\r", "Line 2\r"])
+ self.assertEqual(regex.findall(r"(?w).+", "Line 1\nLine 2\n"),
+ ["Line 1", "Line 2"])
+ self.assertEqual(regex.findall(r"(?w).+", "Line 1\rLine 2\r"),
+ ["Line 1", "Line 2"])
+ self.assertEqual(regex.findall(r"(?w).+", "Line 1\r\nLine 2\r\n"),
+ ["Line 1", "Line 2"])
+
+ self.assertEqual(regex.search(r"^abc", "abc").start(), 0)
+ self.assertEqual(regex.search(r"^abc", "\nabc"), None)
+ self.assertEqual(regex.search(r"^abc", "\rabc"), None)
+ self.assertEqual(regex.search(r"(?w)^abc", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?w)^abc", "\nabc"), None)
+ self.assertEqual(regex.search(r"(?w)^abc", "\rabc"), None)
+
+ self.assertEqual(regex.search(r"abc$", "abc").start(), 0)
+ self.assertEqual(regex.search(r"abc$", "abc\n").start(), 0)
+ self.assertEqual(regex.search(r"abc$", "abc\r"), None)
+ self.assertEqual(regex.search(r"(?w)abc$", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?w)abc$", "abc\n").start(), 0)
+ self.assertEqual(regex.search(r"(?w)abc$", "abc\r").start(), 0)
+
+ self.assertEqual(regex.search(r"(?m)^abc", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?m)^abc", "\nabc").start(), 1)
+ self.assertEqual(regex.search(r"(?m)^abc", "\rabc"), None)
+ self.assertEqual(regex.search(r"(?mw)^abc", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?mw)^abc", "\nabc").start(), 1)
+ self.assertEqual(regex.search(r"(?mw)^abc", "\rabc").start(), 1)
+
+ self.assertEqual(regex.search(r"(?m)abc$", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?m)abc$", "abc\n").start(), 0)
+ self.assertEqual(regex.search(r"(?m)abc$", "abc\r"), None)
+ self.assertEqual(regex.search(r"(?mw)abc$", "abc").start(), 0)
+ self.assertEqual(regex.search(r"(?mw)abc$", "abc\n").start(), 0)
+ self.assertEqual(regex.search(r"(?mw)abc$", "abc\r").start(), 0)
+
+ def test_branch_reset(self):
+ self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "ac").groups(), ('a',
+ None, 'c'))
+ self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "bc").groups(), (None,
+ 'b', 'c'))
+ self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)",
+ "ac").groups(), ('a', None, 'c'))
+ self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)",
+ "bc").groups(), (None, 'b', 'c'))
+
+ self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)",
+ "abd").groups(), ('a', 'b', None, 'd'))
+ self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)",
+ "acd").groups(), ('a', None, 'c', 'd'))
+ self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "abd").groups(),
+ ('a', 'b', None, 'd'))
+
+ self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "acd").groups(),
+ ('a', None, 'c', 'd'))
+ self.assertEqual(regex.match(r"(a)(?|(b)|(b))(d)", "abd").groups(),
+ ('a', 'b', 'd'))
+ self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(),
+ ('a', None, 'c'))
+ self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(),
+ (None, 'b', 'c'))
+ self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(),
+ ('a', 'c'))
+
+ self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(),
+ ('b', 'c'))
+
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)",
+ "abe").groups(), ('a', 'b', 'e'))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)",
+ "cde").groups(), ('d', 'c', 'e'))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)",
+ "abe").groups(), ('a', 'b', 'e'))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)",
+ "cde").groups(), ('d', 'c', 'e'))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)",
+ "abe").groups(), ('a', 'b', 'e'))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)",
+ "cde").groups(), ('c', 'd', 'e'))
+
+ # Hg issue 87: Allow duplicate names of groups
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)",
+ "abe").groups(), ("a", "b", "e"))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)",
+ "abe").capturesdict(), {"a": ["a"], "b": ["b"]})
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)",
+ "cde").groups(), ("d", None, "e"))
+ self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)",
+ "cde").capturesdict(), {"a": ["c", "d"], "b": []})
+
+ def test_set(self):
+ self.assertEqual(regex.match(r"[a]", "a").span(), (0, 1))
+ self.assertEqual(regex.match(r"(?i)[a]", "A").span(), (0, 1))
+ self.assertEqual(regex.match(r"[a-b]", r"a").span(), (0, 1))
+ self.assertEqual(regex.match(r"(?i)[a-b]", r"A").span(), (0, 1))
+
+ self.assertEqual(regex.sub(r"(?V0)([][])", r"-", "a[b]c"), "a-b-c")
+
+ self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"])
+ self.assertEqual(regex.findall(r"(?i)[\p{Alpha}]", "A0"), ["A"])
+
+ self.assertEqual(regex.findall(r"[a\p{Alpha}]", "ab0"), ["a", "b"])
+ self.assertEqual(regex.findall(r"[a\P{Alpha}]", "ab0"), ["a", "0"])
+ self.assertEqual(regex.findall(r"(?i)[a\p{Alpha}]", "ab0"), ["a",
+ "b"])
+ self.assertEqual(regex.findall(r"(?i)[a\P{Alpha}]", "ab0"), ["a",
+ "0"])
+
+ self.assertEqual(regex.findall(r"[a-b\p{Alpha}]", "abC0"), ["a",
+ "b", "C"])
+ self.assertEqual(regex.findall(r"(?i)[a-b\p{Alpha}]", "AbC0"), ["A",
+ "b", "C"])
+
+ self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"])
+ self.assertEqual(regex.findall(r"[\P{Alpha}]", "a0"), ["0"])
+ self.assertEqual(regex.findall(r"[^\p{Alpha}]", "a0"), ["0"])
+ self.assertEqual(regex.findall(r"[^\P{Alpha}]", "a0"), ["a"])
+
+ self.assertEqual("".join(regex.findall(r"[^\d-h]", "a^b12c-h")),
+ 'a^bc')
+ self.assertEqual("".join(regex.findall(r"[^\dh]", "a^b12c-h")),
+ 'a^bc-')
+ self.assertEqual("".join(regex.findall(r"[^h\s\db]", "a^b 12c-h")),
+ 'a^c-')
+ self.assertEqual("".join(regex.findall(r"[^b\w]", "a b")), ' ')
+ self.assertEqual("".join(regex.findall(r"[^b\S]", "a b")), ' ')
+ self.assertEqual("".join(regex.findall(r"[^8\d]", "a 1b2")), 'a b')
+
+ all_chars = "".join(chr(c) for c in range(0x100))
+ self.assertEqual(len(regex.findall(r"\p{ASCII}", all_chars)), 128)
+ self.assertEqual(len(regex.findall(r"\p{Letter}", all_chars)),
+ 117)
+ self.assertEqual(len(regex.findall(r"\p{Digit}", all_chars)), 10)
+
+ # Set operators
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Letter}]",
+ all_chars)), 52)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Letter}]",
+ all_chars)), 52)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Digit}]",
+ all_chars)), 10)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Cc}]",
+ all_chars)), 33)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Graph}]",
+ all_chars)), 94)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}--\p{Cc}]",
+ all_chars)), 95)
+ self.assertEqual(len(regex.findall(r"[\p{Letter}\p{Digit}]",
+ all_chars)), 127)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{Letter}||\p{Digit}]",
+ all_chars)), 127)
+ self.assertEqual(len(regex.findall(r"\p{HexDigit}", all_chars)),
+ 22)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{HexDigit}~~\p{Digit}]",
+ all_chars)), 12)
+ self.assertEqual(len(regex.findall(r"(?V1)[\p{Digit}~~\p{HexDigit}]",
+ all_chars)), 12)
+
+ self.assertEqual(repr(type(regex.compile(r"(?V0)([][-])"))),
+ self.PATTERN_CLASS)
+ self.assertEqual(regex.findall(r"(?V1)[[a-z]--[aei]]", "abc"), ["b",
+ "c"])
+ self.assertEqual(regex.findall(r"(?iV1)[[a-z]--[aei]]", "abc"), ["b",
+ "c"])
+ self.assertEqual(regex.findall(r"(?V1)[\w--a]","abc"), ["b", "c"])
+ self.assertEqual(regex.findall(r"(?iV1)[\w--a]","abc"), ["b", "c"])
+
+ def test_various(self):
+ tests = [
+ # Test ?P< and ?P= extensions.
+ ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with a digit.
+ ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char.
+ ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char.
+
+ # Same tests, for the ?P= form.
+ ('(?Pa)(?P=foo_123', 'aa', '', regex.error,
+ self.MISSING_RPAREN),
+ ('(?Pa)(?P=1)', 'aa', '1', ascii('a')),
+ ('(?Pa)(?P=0)', 'aa', '', regex.error,
+ self.BAD_GROUP_NAME),
+ ('(?Pa)(?P=-1)', 'aa', '', regex.error,
+ self.BAD_GROUP_NAME),
+ ('(?Pa)(?P=!)', 'aa', '', regex.error,
+ self.BAD_GROUP_NAME),
+ ('(?Pa)(?P=foo_124)', 'aa', '', regex.error,
+ self.UNKNOWN_GROUP), # Backref to undefined group.
+
+ ('(?Pa)', 'a', '1', ascii('a')),
+ ('(?Pa)(?P=foo_123)', 'aa', '1', ascii('a')),
+
+ # Mal-formed \g in pattern treated as literal for compatibility.
+ (r'(?a)\ga)\g<1>', 'aa', '1', ascii('a')),
+ (r'(?a)\g', 'aa', '', ascii(None)),
+ (r'(?a)\g', 'aa', '', regex.error,
+ self.UNKNOWN_GROUP), # Backref to undefined group.
+
+ ('(?a)', 'a', '1', ascii('a')),
+ (r'(?a)\g', 'aa', '1', ascii('a')),
+
+ # Test octal escapes.
+ ('\\1', 'a', '', regex.error, self.INVALID_GROUP_REF), # Backreference.
+ ('[\\1]', '\1', '0', "'\\x01'"), # Character.
+ ('\\09', chr(0) + '9', '0', ascii(chr(0) + '9')),
+ ('\\141', 'a', '0', ascii('a')),
+ ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9',
+ '0,11', ascii(('abcdefghijklk9', 'k'))),
+
+ # Test \0 is handled everywhere.
+ (r'\0', '\0', '0', ascii('\0')),
+ (r'[\0a]', '\0', '0', ascii('\0')),
+ (r'[a\0]', '\0', '0', ascii('\0')),
+ (r'[^a\0]', '\0', '', ascii(None)),
+
+ # Test various letter escapes.
+ (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', '0',
+ ascii('\a\b\f\n\r\t\v')),
+ (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', '0',
+ ascii('\a\b\f\n\r\t\v')),
+ (r'\xff', '\377', '0', ascii(chr(255))),
+
+ # New \x semantics.
+ (r'\x00ffffffffffffff', '\377', '', ascii(None)),
+ (r'\x00f', '\017', '', ascii(None)),
+ (r'\x00fe', '\376', '', ascii(None)),
+
+ (r'\x00ff', '\377', '', ascii(None)),
+ (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')),
+ ('\t\n\v\r\f\a\\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')),
+ (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', '0', ascii(chr(9) + chr(10) +
+ chr(11) + chr(13) + chr(12) + chr(7))),
+ (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', '0',
+ ascii('\t\n\v\r\f\b')),
+
+ (r"^\w+=(\\[\000-\277]|[^\n\\])*",
+ "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", '0',
+ ascii("SRC=eval.c g.c blah blah blah \\\\")),
+
+ # Test that . only matches \n in DOTALL mode.
+ ('a.b', 'acb', '0', ascii('acb')),
+ ('a.b', 'a\nb', '', ascii(None)),
+ ('a.*b', 'acc\nccb', '', ascii(None)),
+ ('a.{4,5}b', 'acc\nccb', '', ascii(None)),
+ ('a.b', 'a\rb', '0', ascii('a\rb')),
+ # Changed to positional flags in regex 2023.12.23.
+ ('a.b(?s)', 'a\nb', '', ascii(None)),
+ ('(?s)a.b', 'a\nb', '0', ascii('a\nb')),
+ ('a.*(?s)b', 'acc\nccb', '', ascii(None)),
+ ('(?s)a.*b', 'acc\nccb', '0', ascii('acc\nccb')),
+ ('(?s)a.{4,5}b', 'acc\nccb', '0', ascii('acc\nccb')),
+
+ (')', '', '', regex.error, self.TRAILING_CHARS), # Unmatched right bracket.
+ ('', '', '0', "''"), # Empty pattern.
+ ('abc', 'abc', '0', ascii('abc')),
+ ('abc', 'xbc', '', ascii(None)),
+ ('abc', 'axc', '', ascii(None)),
+ ('abc', 'abx', '', ascii(None)),
+ ('abc', 'xabcy', '0', ascii('abc')),
+ ('abc', 'ababc', '0', ascii('abc')),
+ ('ab*c', 'abc', '0', ascii('abc')),
+ ('ab*bc', 'abc', '0', ascii('abc')),
+
+ ('ab*bc', 'abbc', '0', ascii('abbc')),
+ ('ab*bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab+bc', 'abbc', '0', ascii('abbc')),
+ ('ab+bc', 'abc', '', ascii(None)),
+ ('ab+bc', 'abq', '', ascii(None)),
+ ('ab+bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab?bc', 'abbc', '0', ascii('abbc')),
+ ('ab?bc', 'abc', '0', ascii('abc')),
+ ('ab?bc', 'abbbbc', '', ascii(None)),
+ ('ab?c', 'abc', '0', ascii('abc')),
+
+ ('^abc$', 'abc', '0', ascii('abc')),
+ ('^abc$', 'abcc', '', ascii(None)),
+ ('^abc', 'abcc', '0', ascii('abc')),
+ ('^abc$', 'aabc', '', ascii(None)),
+ ('abc$', 'aabc', '0', ascii('abc')),
+ ('^', 'abc', '0', ascii('')),
+ ('$', 'abc', '0', ascii('')),
+ ('a.c', 'abc', '0', ascii('abc')),
+ ('a.c', 'axc', '0', ascii('axc')),
+ ('a.*c', 'axyzc', '0', ascii('axyzc')),
+
+ ('a.*c', 'axyzd', '', ascii(None)),
+ ('a[bc]d', 'abc', '', ascii(None)),
+ ('a[bc]d', 'abd', '0', ascii('abd')),
+ ('a[b-d]e', 'abd', '', ascii(None)),
+ ('a[b-d]e', 'ace', '0', ascii('ace')),
+ ('a[b-d]', 'aac', '0', ascii('ac')),
+ ('a[-b]', 'a-', '0', ascii('a-')),
+ ('a[\\-b]', 'a-', '0', ascii('a-')),
+ ('a[b-]', 'a-', '0', ascii('a-')),
+ ('a[]b', '-', '', regex.error, self.BAD_SET),
+
+ ('a[', '-', '', regex.error, self.BAD_SET),
+ ('a\\', '-', '', regex.error, self.BAD_ESCAPE),
+ ('abc)', '-', '', regex.error, self.TRAILING_CHARS),
+ ('(abc', '-', '', regex.error, self.MISSING_RPAREN),
+ ('a]', 'a]', '0', ascii('a]')),
+ ('a[]]b', 'a]b', '0', ascii('a]b')),
+ ('a[]]b', 'a]b', '0', ascii('a]b')),
+ ('a[^bc]d', 'aed', '0', ascii('aed')),
+ ('a[^bc]d', 'abd', '', ascii(None)),
+ ('a[^-b]c', 'adc', '0', ascii('adc')),
+
+ ('a[^-b]c', 'a-c', '', ascii(None)),
+ ('a[^]b]c', 'a]c', '', ascii(None)),
+ ('a[^]b]c', 'adc', '0', ascii('adc')),
+ ('\\ba\\b', 'a-', '0', ascii('a')),
+ ('\\ba\\b', '-a', '0', ascii('a')),
+ ('\\ba\\b', '-a-', '0', ascii('a')),
+ ('\\by\\b', 'xy', '', ascii(None)),
+ ('\\by\\b', 'yz', '', ascii(None)),
+ ('\\by\\b', 'xyz', '', ascii(None)),
+ ('x\\b', 'xyz', '', ascii(None)),
+
+ ('x\\B', 'xyz', '0', ascii('x')),
+ ('\\Bz', 'xyz', '0', ascii('z')),
+ ('z\\B', 'xyz', '', ascii(None)),
+ ('\\Bx', 'xyz', '', ascii(None)),
+ ('\\Ba\\B', 'a-', '', ascii(None)),
+ ('\\Ba\\B', '-a', '', ascii(None)),
+ ('\\Ba\\B', '-a-', '', ascii(None)),
+ ('\\By\\B', 'xy', '', ascii(None)),
+ ('\\By\\B', 'yz', '', ascii(None)),
+ ('\\By\\b', 'xy', '0', ascii('y')),
+
+ ('\\by\\B', 'yz', '0', ascii('y')),
+ ('\\By\\B', 'xyz', '0', ascii('y')),
+ ('ab|cd', 'abc', '0', ascii('ab')),
+ ('ab|cd', 'abcd', '0', ascii('ab')),
+ ('()ef', 'def', '0,1', ascii(('ef', ''))),
+ ('$b', 'b', '', ascii(None)),
+ ('a\\(b', 'a(b', '', ascii(('a(b',))),
+ ('a\\(*b', 'ab', '0', ascii('ab')),
+ ('a\\(*b', 'a((b', '0', ascii('a((b')),
+ ('a\\\\b', 'a\\b', '0', ascii('a\\b')),
+
+ ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))),
+ ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))),
+ ('a+b+c', 'aabbabc', '0', ascii('abc')),
+ ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))),
+ ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))),
+ ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))),
+ (')(', '-', '', regex.error, self.TRAILING_CHARS),
+ ('[^ab]*', 'cde', '0', ascii('cde')),
+ ('abc', '', '', ascii(None)),
+ ('a*', '', '0', ascii('')),
+
+ ('a|b|c|d|e', 'e', '0', ascii('e')),
+ ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))),
+ ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')),
+ ('ab*', 'xabyabbbz', '0', ascii('ab')),
+ ('ab*', 'xayabbbz', '0', ascii('a')),
+ ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))),
+ ('[abhgefdc]ij', 'hij', '0', ascii('hij')),
+ ('^(ab|cd)e', 'abcde', '', ascii(None)),
+ ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))),
+ ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))),
+
+ ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))),
+ ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))),
+ ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))),
+ ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))),
+ ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))),
+ ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')),
+ ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)),
+ ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))),
+ ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))),
+ ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')),
+
+ ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz',
+ 'effgz', None))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij',
+ 'j'))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz',
+ 'effgz', None))),
+ ('(((((((((a)))))))))', 'a', '0', ascii('a')),
+ ('multiple words of text', 'uh-uh', '', ascii(None)),
+ ('multiple words', 'multiple words, yeah', '0',
+ ascii('multiple words')),
+ ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))),
+
+ ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))),
+ ('[k]', 'ab', '', ascii(None)),
+ ('a[-]?c', 'ac', '0', ascii('ac')),
+ ('(abc)\\1', 'abcabc', '1', ascii('abc')),
+ ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')),
+ ('^(.+)?B', 'AB', '1', ascii('A')),
+ ('(a+).\\1$', 'aaaaa', '0,1', ascii(('aaaaa', 'aa'))),
+ ('^(a+).\\1$', 'aaaa', '', ascii(None)),
+ ('(abc)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))),
+ ('([a-c]+)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))),
+
+ ('(a)\\1', 'aa', '0,1', ascii(('aa', 'a'))),
+ ('(a+)\\1', 'aa', '0,1', ascii(('aa', 'a'))),
+ ('(a+)+\\1', 'aa', '0,1', ascii(('aa', 'a'))),
+ ('(a).+\\1', 'aba', '0,1', ascii(('aba', 'a'))),
+ ('(a)ba*\\1', 'aba', '0,1', ascii(('aba', 'a'))),
+ ('(aa|a)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))),
+ ('(a|aa)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))),
+ ('(a+)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))),
+ ('([abc]*)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))),
+ ('(a)(b)c|ab', 'ab', '0,1,2', ascii(('ab', None, None))),
+
+ ('(a)+x', 'aaax', '0,1', ascii(('aaax', 'a'))),
+ ('([ac])+x', 'aacx', '0,1', ascii(('aacx', 'c'))),
+ ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', '0,1',
+ ascii(('d:msgs/tdir/sub1/', 'tdir/'))),
+ ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah',
+ '0,1,2,3', ascii(('track1.title:TBlah blah blah', 'track1',
+ 'title', 'Blah blah blah'))),
+ ('([^N]*N)+', 'abNNxyzN', '0,1', ascii(('abNNxyzN', 'xyzN'))),
+ ('([^N]*N)+', 'abNNxyz', '0,1', ascii(('abNN', 'N'))),
+ ('([abc]*)x', 'abcx', '0,1', ascii(('abcx', 'abc'))),
+ ('([abc]*)x', 'abc', '', ascii(None)),
+ ('([xyz]*)x', 'abcx', '0,1', ascii(('x', ''))),
+ ('(a)+b|aac', 'aac', '0,1', ascii(('aac', None))),
+
+ # Test symbolic groups.
+ ('(?Paaa)a', 'aaaa', '', regex.error, self.BAD_GROUP_NAME),
+ ('(?Paaa)a', 'aaaa', '0,id', ascii(('aaaa', 'aaa'))),
+ ('(?Paa)(?P=id)', 'aaaa', '0,id', ascii(('aaaa', 'aa'))),
+ ('(?Paa)(?P=xd)', 'aaaa', '', regex.error, self.UNKNOWN_GROUP),
+
+ # Character properties.
+ (r"\g", "g", '0', ascii('g')),
+ (r"\g<1>", "g", '', regex.error, self.INVALID_GROUP_REF),
+ (r"(.)\g<1>", "gg", '0', ascii('gg')),
+ (r"(.)\g<1>", "gg", '', ascii(('gg', 'g'))),
+ (r"\N", "N", '0', ascii('N')),
+ (r"\N{LATIN SMALL LETTER A}", "a", '0', ascii('a')),
+ (r"\p", "p", '0', ascii('p')),
+ (r"\p{Ll}", "a", '0', ascii('a')),
+ (r"\P", "P", '0', ascii('P')),
+ (r"\P{Lu}", "p", '0', ascii('p')),
+
+ # All tests from Perl.
+ ('abc', 'abc', '0', ascii('abc')),
+ ('abc', 'xbc', '', ascii(None)),
+ ('abc', 'axc', '', ascii(None)),
+ ('abc', 'abx', '', ascii(None)),
+ ('abc', 'xabcy', '0', ascii('abc')),
+ ('abc', 'ababc', '0', ascii('abc')),
+
+ ('ab*c', 'abc', '0', ascii('abc')),
+ ('ab*bc', 'abc', '0', ascii('abc')),
+ ('ab*bc', 'abbc', '0', ascii('abbc')),
+ ('ab*bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab{0,}bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab+bc', 'abbc', '0', ascii('abbc')),
+ ('ab+bc', 'abc', '', ascii(None)),
+ ('ab+bc', 'abq', '', ascii(None)),
+ ('ab{1,}bc', 'abq', '', ascii(None)),
+ ('ab+bc', 'abbbbc', '0', ascii('abbbbc')),
+
+ ('ab{1,}bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab{1,3}bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab{3,4}bc', 'abbbbc', '0', ascii('abbbbc')),
+ ('ab{4,5}bc', 'abbbbc', '', ascii(None)),
+ ('ab?bc', 'abbc', '0', ascii('abbc')),
+ ('ab?bc', 'abc', '0', ascii('abc')),
+ ('ab{0,1}bc', 'abc', '0', ascii('abc')),
+ ('ab?bc', 'abbbbc', '', ascii(None)),
+ ('ab?c', 'abc', '0', ascii('abc')),
+ ('ab{0,1}c', 'abc', '0', ascii('abc')),
+
+ ('^abc$', 'abc', '0', ascii('abc')),
+ ('^abc$', 'abcc', '', ascii(None)),
+ ('^abc', 'abcc', '0', ascii('abc')),
+ ('^abc$', 'aabc', '', ascii(None)),
+ ('abc$', 'aabc', '0', ascii('abc')),
+ ('^', 'abc', '0', ascii('')),
+ ('$', 'abc', '0', ascii('')),
+ ('a.c', 'abc', '0', ascii('abc')),
+ ('a.c', 'axc', '0', ascii('axc')),
+ ('a.*c', 'axyzc', '0', ascii('axyzc')),
+
+ ('a.*c', 'axyzd', '', ascii(None)),
+ ('a[bc]d', 'abc', '', ascii(None)),
+ ('a[bc]d', 'abd', '0', ascii('abd')),
+ ('a[b-d]e', 'abd', '', ascii(None)),
+ ('a[b-d]e', 'ace', '0', ascii('ace')),
+ ('a[b-d]', 'aac', '0', ascii('ac')),
+ ('a[-b]', 'a-', '0', ascii('a-')),
+ ('a[b-]', 'a-', '0', ascii('a-')),
+ ('a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE),
+ ('a[]b', '-', '', regex.error, self.BAD_SET),
+
+ ('a[', '-', '', regex.error, self.BAD_SET),
+ ('a]', 'a]', '0', ascii('a]')),
+ ('a[]]b', 'a]b', '0', ascii('a]b')),
+ ('a[^bc]d', 'aed', '0', ascii('aed')),
+ ('a[^bc]d', 'abd', '', ascii(None)),
+ ('a[^-b]c', 'adc', '0', ascii('adc')),
+ ('a[^-b]c', 'a-c', '', ascii(None)),
+ ('a[^]b]c', 'a]c', '', ascii(None)),
+ ('a[^]b]c', 'adc', '0', ascii('adc')),
+ ('ab|cd', 'abc', '0', ascii('ab')),
+
+ ('ab|cd', 'abcd', '0', ascii('ab')),
+ ('()ef', 'def', '0,1', ascii(('ef', ''))),
+ ('*a', '-', '', regex.error, self.NOTHING_TO_REPEAT),
+ ('(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT),
+ ('$b', 'b', '', ascii(None)),
+ ('a\\', '-', '', regex.error, self.BAD_ESCAPE),
+ ('a\\(b', 'a(b', '', ascii(('a(b',))),
+ ('a\\(*b', 'ab', '0', ascii('ab')),
+ ('a\\(*b', 'a((b', '0', ascii('a((b')),
+ ('a\\\\b', 'a\\b', '0', ascii('a\\b')),
+
+ ('abc)', '-', '', regex.error, self.TRAILING_CHARS),
+ ('(abc', '-', '', regex.error, self.MISSING_RPAREN),
+ ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))),
+ ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))),
+ ('a+b+c', 'aabbabc', '0', ascii('abc')),
+ ('a{1,}b{1,}c', 'aabbabc', '0', ascii('abc')),
+ ('a**', '-', '', regex.error, self.MULTIPLE_REPEAT),
+ ('a.+?c', 'abcabc', '0', ascii('abc')),
+ ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))),
+ ('(a+|b){0,}', 'ab', '0,1', ascii(('ab', 'b'))),
+
+ ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))),
+ ('(a+|b){1,}', 'ab', '0,1', ascii(('ab', 'b'))),
+ ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))),
+ ('(a+|b){0,1}', 'ab', '0,1', ascii(('a', 'a'))),
+ (')(', '-', '', regex.error, self.TRAILING_CHARS),
+ ('[^ab]*', 'cde', '0', ascii('cde')),
+ ('abc', '', '', ascii(None)),
+ ('a*', '', '0', ascii('')),
+ ('([abc])*d', 'abbbcd', '0,1', ascii(('abbbcd', 'c'))),
+ ('([abc])*bcd', 'abcd', '0,1', ascii(('abcd', 'a'))),
+
+ ('a|b|c|d|e', 'e', '0', ascii('e')),
+ ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))),
+ ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')),
+ ('ab*', 'xabyabbbz', '0', ascii('ab')),
+ ('ab*', 'xayabbbz', '0', ascii('a')),
+ ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))),
+ ('[abhgefdc]ij', 'hij', '0', ascii('hij')),
+ ('^(ab|cd)e', 'abcde', '', ascii(None)),
+ ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))),
+ ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))),
+
+ ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))),
+ ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))),
+ ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))),
+ ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))),
+ ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))),
+ ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')),
+ ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)),
+ ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))),
+ ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))),
+ ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')),
+
+ ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz',
+ 'effgz', None))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij',
+ 'j'))),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)),
+ ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz',
+ 'effgz', None))),
+ ('((((((((((a))))))))))', 'a', '10', ascii('a')),
+ ('((((((((((a))))))))))\\10', 'aa', '0', ascii('aa')),
+
+ # Python does not have the same rules for \\41 so this is a syntax error
+ # ('((((((((((a))))))))))\\41', 'aa', '', ascii(None)),
+ # ('((((((((((a))))))))))\\41', 'a!', '0', ascii('a!')),
+ ('((((((((((a))))))))))\\41', '', '', regex.error,
+ self.INVALID_GROUP_REF),
+ ('(?i)((((((((((a))))))))))\\41', '', '', regex.error,
+ self.INVALID_GROUP_REF),
+
+ ('(((((((((a)))))))))', 'a', '0', ascii('a')),
+ ('multiple words of text', 'uh-uh', '', ascii(None)),
+ ('multiple words', 'multiple words, yeah', '0',
+ ascii('multiple words')),
+ ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))),
+ ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))),
+ ('[k]', 'ab', '', ascii(None)),
+ ('a[-]?c', 'ac', '0', ascii('ac')),
+ ('(abc)\\1', 'abcabc', '1', ascii('abc')),
+ ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')),
+ ('(?i)abc', 'ABC', '0', ascii('ABC')),
+
+ ('(?i)abc', 'XBC', '', ascii(None)),
+ ('(?i)abc', 'AXC', '', ascii(None)),
+ ('(?i)abc', 'ABX', '', ascii(None)),
+ ('(?i)abc', 'XABCY', '0', ascii('ABC')),
+ ('(?i)abc', 'ABABC', '0', ascii('ABC')),
+ ('(?i)ab*c', 'ABC', '0', ascii('ABC')),
+ ('(?i)ab*bc', 'ABC', '0', ascii('ABC')),
+ ('(?i)ab*bc', 'ABBC', '0', ascii('ABBC')),
+ ('(?i)ab*?bc', 'ABBBBC', '0', ascii('ABBBBC')),
+ ('(?i)ab{0,}?bc', 'ABBBBC', '0', ascii('ABBBBC')),
+
+ ('(?i)ab+?bc', 'ABBC', '0', ascii('ABBC')),
+ ('(?i)ab+bc', 'ABC', '', ascii(None)),
+ ('(?i)ab+bc', 'ABQ', '', ascii(None)),
+ ('(?i)ab{1,}bc', 'ABQ', '', ascii(None)),
+ ('(?i)ab+bc', 'ABBBBC', '0', ascii('ABBBBC')),
+ ('(?i)ab{1,}?bc', 'ABBBBC', '0', ascii('ABBBBC')),
+ ('(?i)ab{1,3}?bc', 'ABBBBC', '0', ascii('ABBBBC')),
+ ('(?i)ab{3,4}?bc', 'ABBBBC', '0', ascii('ABBBBC')),
+ ('(?i)ab{4,5}?bc', 'ABBBBC', '', ascii(None)),
+ ('(?i)ab??bc', 'ABBC', '0', ascii('ABBC')),
+
+ ('(?i)ab??bc', 'ABC', '0', ascii('ABC')),
+ ('(?i)ab{0,1}?bc', 'ABC', '0', ascii('ABC')),
+ ('(?i)ab??bc', 'ABBBBC', '', ascii(None)),
+ ('(?i)ab??c', 'ABC', '0', ascii('ABC')),
+ ('(?i)ab{0,1}?c', 'ABC', '0', ascii('ABC')),
+ ('(?i)^abc$', 'ABC', '0', ascii('ABC')),
+ ('(?i)^abc$', 'ABCC', '', ascii(None)),
+ ('(?i)^abc', 'ABCC', '0', ascii('ABC')),
+ ('(?i)^abc$', 'AABC', '', ascii(None)),
+ ('(?i)abc$', 'AABC', '0', ascii('ABC')),
+
+ ('(?i)^', 'ABC', '0', ascii('')),
+ ('(?i)$', 'ABC', '0', ascii('')),
+ ('(?i)a.c', 'ABC', '0', ascii('ABC')),
+ ('(?i)a.c', 'AXC', '0', ascii('AXC')),
+ ('(?i)a.*?c', 'AXYZC', '0', ascii('AXYZC')),
+ ('(?i)a.*c', 'AXYZD', '', ascii(None)),
+ ('(?i)a[bc]d', 'ABC', '', ascii(None)),
+ ('(?i)a[bc]d', 'ABD', '0', ascii('ABD')),
+ ('(?i)a[b-d]e', 'ABD', '', ascii(None)),
+ ('(?i)a[b-d]e', 'ACE', '0', ascii('ACE')),
+
+ ('(?i)a[b-d]', 'AAC', '0', ascii('AC')),
+ ('(?i)a[-b]', 'A-', '0', ascii('A-')),
+ ('(?i)a[b-]', 'A-', '0', ascii('A-')),
+ ('(?i)a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE),
+ ('(?i)a[]b', '-', '', regex.error, self.BAD_SET),
+ ('(?i)a[', '-', '', regex.error, self.BAD_SET),
+ ('(?i)a]', 'A]', '0', ascii('A]')),
+ ('(?i)a[]]b', 'A]B', '0', ascii('A]B')),
+ ('(?i)a[^bc]d', 'AED', '0', ascii('AED')),
+ ('(?i)a[^bc]d', 'ABD', '', ascii(None)),
+
+ ('(?i)a[^-b]c', 'ADC', '0', ascii('ADC')),
+ ('(?i)a[^-b]c', 'A-C', '', ascii(None)),
+ ('(?i)a[^]b]c', 'A]C', '', ascii(None)),
+ ('(?i)a[^]b]c', 'ADC', '0', ascii('ADC')),
+ ('(?i)ab|cd', 'ABC', '0', ascii('AB')),
+ ('(?i)ab|cd', 'ABCD', '0', ascii('AB')),
+ ('(?i)()ef', 'DEF', '0,1', ascii(('EF', ''))),
+ ('(?i)*a', '-', '', regex.error, self.NOTHING_TO_REPEAT),
+ ('(?i)(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT),
+ ('(?i)$b', 'B', '', ascii(None)),
+
+ ('(?i)a\\', '-', '', regex.error, self.BAD_ESCAPE),
+ ('(?i)a\\(b', 'A(B', '', ascii(('A(B',))),
+ ('(?i)a\\(*b', 'AB', '0', ascii('AB')),
+ ('(?i)a\\(*b', 'A((B', '0', ascii('A((B')),
+ ('(?i)a\\\\b', 'A\\B', '0', ascii('A\\B')),
+ ('(?i)abc)', '-', '', regex.error, self.TRAILING_CHARS),
+ ('(?i)(abc', '-', '', regex.error, self.MISSING_RPAREN),
+ ('(?i)((a))', 'ABC', '0,1,2', ascii(('A', 'A', 'A'))),
+ ('(?i)(a)b(c)', 'ABC', '0,1,2', ascii(('ABC', 'A', 'C'))),
+ ('(?i)a+b+c', 'AABBABC', '0', ascii('ABC')),
+
+ ('(?i)a{1,}b{1,}c', 'AABBABC', '0', ascii('ABC')),
+ ('(?i)a**', '-', '', regex.error, self.MULTIPLE_REPEAT),
+ ('(?i)a.+?c', 'ABCABC', '0', ascii('ABC')),
+ ('(?i)a.*?c', 'ABCABC', '0', ascii('ABC')),
+ ('(?i)a.{0,5}?c', 'ABCABC', '0', ascii('ABC')),
+ ('(?i)(a+|b)*', 'AB', '0,1', ascii(('AB', 'B'))),
+ ('(?i)(a+|b){0,}', 'AB', '0,1', ascii(('AB', 'B'))),
+ ('(?i)(a+|b)+', 'AB', '0,1', ascii(('AB', 'B'))),
+ ('(?i)(a+|b){1,}', 'AB', '0,1', ascii(('AB', 'B'))),
+ ('(?i)(a+|b)?', 'AB', '0,1', ascii(('A', 'A'))),
+
+ ('(?i)(a+|b){0,1}', 'AB', '0,1', ascii(('A', 'A'))),
+ ('(?i)(a+|b){0,1}?', 'AB', '0,1', ascii(('', None))),
+ ('(?i))(', '-', '', regex.error, self.TRAILING_CHARS),
+ ('(?i)[^ab]*', 'CDE', '0', ascii('CDE')),
+ ('(?i)abc', '', '', ascii(None)),
+ ('(?i)a*', '', '0', ascii('')),
+ ('(?i)([abc])*d', 'ABBBCD', '0,1', ascii(('ABBBCD', 'C'))),
+ ('(?i)([abc])*bcd', 'ABCD', '0,1', ascii(('ABCD', 'A'))),
+ ('(?i)a|b|c|d|e', 'E', '0', ascii('E')),
+ ('(?i)(a|b|c|d|e)f', 'EF', '0,1', ascii(('EF', 'E'))),
+
+ ('(?i)abcd*efg', 'ABCDEFG', '0', ascii('ABCDEFG')),
+ ('(?i)ab*', 'XABYABBBZ', '0', ascii('AB')),
+ ('(?i)ab*', 'XAYABBBZ', '0', ascii('A')),
+ ('(?i)(ab|cd)e', 'ABCDE', '0,1', ascii(('CDE', 'CD'))),
+ ('(?i)[abhgefdc]ij', 'HIJ', '0', ascii('HIJ')),
+ ('(?i)^(ab|cd)e', 'ABCDE', '', ascii(None)),
+ ('(?i)(abc|)ef', 'ABCDEF', '0,1', ascii(('EF', ''))),
+ ('(?i)(a|b)c*d', 'ABCD', '0,1', ascii(('BCD', 'B'))),
+ ('(?i)(ab|ab*)bc', 'ABC', '0,1', ascii(('ABC', 'A'))),
+ ('(?i)a([bc]*)c*', 'ABC', '0,1', ascii(('ABC', 'BC'))),
+
+ ('(?i)a([bc]*)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))),
+ ('(?i)a([bc]+)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))),
+ ('(?i)a([bc]*)(c+d)', 'ABCD', '0,1,2', ascii(('ABCD', 'B', 'CD'))),
+ ('(?i)a[bcd]*dcdcde', 'ADCDCDE', '0', ascii('ADCDCDE')),
+ ('(?i)a[bcd]+dcdcde', 'ADCDCDE', '', ascii(None)),
+ ('(?i)(ab|a)b*c', 'ABC', '0,1', ascii(('ABC', 'AB'))),
+ ('(?i)((a)(b)c)(d)', 'ABCD', '1,2,3,4', ascii(('ABC', 'A', 'B',
+ 'D'))),
+ ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', '0', ascii('ALPHA')),
+ ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', '0,1', ascii(('BH', None))),
+ ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', '0,1,2', ascii(('EFFGZ',
+ 'EFFGZ', None))),
+
+ ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', '0,1,2', ascii(('IJ', 'IJ',
+ 'J'))),
+ ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', '', ascii(None)),
+ ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', '', ascii(None)),
+ ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', '0,1,2', ascii(('EFFGZ',
+ 'EFFGZ', None))),
+ ('(?i)((((((((((a))))))))))', 'A', '10', ascii('A')),
+ ('(?i)((((((((((a))))))))))\\10', 'AA', '0', ascii('AA')),
+ #('(?i)((((((((((a))))))))))\\41', 'AA', '', ascii(None)),
+ #('(?i)((((((((((a))))))))))\\41', 'A!', '0', ascii('A!')),
+ ('(?i)(((((((((a)))))))))', 'A', '0', ascii('A')),
+ ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', '1',
+ ascii('A')),
+ ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', '1',
+ ascii('C')),
+ ('(?i)multiple words of text', 'UH-UH', '', ascii(None)),
+
+ ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', '0',
+ ascii('MULTIPLE WORDS')),
+ ('(?i)(.*)c(.*)', 'ABCDE', '0,1,2', ascii(('ABCDE', 'AB', 'DE'))),
+ ('(?i)\\((.*), (.*)\\)', '(A, B)', '2,1', ascii(('B', 'A'))),
+ ('(?i)[k]', 'AB', '', ascii(None)),
+ # ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', ascii(ABCD-$&-\\ABCD)),
+ # ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', ascii(BC-$1-\\BC)),
+ ('(?i)a[-]?c', 'AC', '0', ascii('AC')),
+ ('(?i)(abc)\\1', 'ABCABC', '1', ascii('ABC')),
+ ('(?i)([a-c]*)\\1', 'ABCABC', '1', ascii('ABC')),
+ ('a(?!b).', 'abad', '0', ascii('ad')),
+ ('a(?=d).', 'abad', '0', ascii('ad')),
+ ('a(?=c|d).', 'abad', '0', ascii('ad')),
+
+ ('a(?:b|c|d)(.)', 'ace', '1', ascii('e')),
+ ('a(?:b|c|d)*(.)', 'ace', '1', ascii('e')),
+ ('a(?:b|c|d)+?(.)', 'ace', '1', ascii('e')),
+ ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', '1,2', ascii(('c', 'e'))),
+
+ # Lookbehind: split by : but not if it is escaped by -.
+ ('(?]*?b', 'a>b', '', ascii(None)),
+ # Bug 490573: minimizing repeat problem.
+ (r'^a*?$', 'foo', '', ascii(None)),
+ # Bug 470582: nested groups problem.
+ (r'^((a)c)?(ab)$', 'ab', '1,2,3', ascii((None, None, 'ab'))),
+ # Another minimizing repeat problem (capturing groups in assertions).
+ ('^([ab]*?)(?=(b)?)c', 'abc', '1,2', ascii(('ab', None))),
+ ('^([ab]*?)(?!(b))c', 'abc', '1,2', ascii(('ab', None))),
+ ('^([ab]*?)(?(.){0,2})d", "abcd").captures(1),
+ ['b', 'c'])
+ self.assertEqual(regex.search(r"(.)+", "a").captures(1), ['a'])
+
+ def test_guards(self):
+ m = regex.search(r"(X.*?Y\s*){3}(X\s*)+AB:",
+ "XY\nX Y\nX Y\nXY\nXX AB:")
+ self.assertEqual(m.span(0, 1, 2), ((3, 21), (12, 15), (16, 18)))
+
+ m = regex.search(r"(X.*?Y\s*){3,}(X\s*)+AB:",
+ "XY\nX Y\nX Y\nXY\nXX AB:")
+ self.assertEqual(m.span(0, 1, 2), ((0, 21), (12, 15), (16, 18)))
+
+ m = regex.search(r'\d{4}(\s*\w)?\W*((?!\d)\w){2}', "9999XX")
+ self.assertEqual(m.span(0, 1, 2), ((0, 6), (-1, -1), (5, 6)))
+
+ m = regex.search(r'A\s*?.*?(\n+.*?\s*?){0,2}\(X', 'A\n1\nS\n1 (X')
+ self.assertEqual(m.span(0, 1), ((0, 10), (5, 8)))
+
+ m = regex.search(r'Derde\s*:', 'aaaaaa:\nDerde:')
+ self.assertEqual(m.span(), (8, 14))
+ m = regex.search(r'Derde\s*:', 'aaaaa:\nDerde:')
+ self.assertEqual(m.span(), (7, 13))
+
+ def test_turkic(self):
+ # Turkish has dotted and dotless I/i.
+ pairs = "I=i;I=\u0131;i=\u0130"
+
+ all_chars = set()
+ matching = set()
+ for pair in pairs.split(";"):
+ ch1, ch2 = pair.split("=")
+ all_chars.update((ch1, ch2))
+ matching.add((ch1, ch1))
+ matching.add((ch1, ch2))
+ matching.add((ch2, ch1))
+ matching.add((ch2, ch2))
+
+ for ch1 in all_chars:
+ for ch2 in all_chars:
+ m = regex.match(r"(?i)\A" + ch1 + r"\Z", ch2)
+ if m:
+ if (ch1, ch2) not in matching:
+ self.fail("{} matching {}".format(ascii(ch1),
+ ascii(ch2)))
+ else:
+ if (ch1, ch2) in matching:
+ self.fail("{} not matching {}".format(ascii(ch1),
+ ascii(ch2)))
+
+ def test_named_lists(self):
+ options = ["one", "two", "three"]
+ self.assertEqual(regex.match(r"333\L444", "333one444",
+ bar=options).group(), "333one444")
+ self.assertEqual(regex.match(r"(?i)333\L444", "333TWO444",
+ bar=options).group(), "333TWO444")
+ self.assertEqual(regex.match(r"333\L444", "333four444",
+ bar=options), None)
+
+ options = [b"one", b"two", b"three"]
+ self.assertEqual(regex.match(br"333\L444", b"333one444",
+ bar=options).group(), b"333one444")
+ self.assertEqual(regex.match(br"(?i)333\L444", b"333TWO444",
+ bar=options).group(), b"333TWO444")
+ self.assertEqual(regex.match(br"333\L444", b"333four444",
+ bar=options), None)
+
+ self.assertEqual(repr(type(regex.compile(r"3\L4\L+5",
+ bar=["one", "two", "three"]))), self.PATTERN_CLASS)
+
+ self.assertEqual(regex.findall(r"^\L", "solid QWERT",
+ options=set(['good', 'brilliant', '+s\\ol[i}d'])), [])
+ self.assertEqual(regex.findall(r"^\L", "+solid QWERT",
+ options=set(['good', 'brilliant', '+solid'])), ['+solid'])
+
+ options = ["STRASSE"]
+ self.assertEqual(regex.match(r"(?fi)\L",
+ "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0,
+ 6))
+
+ options = ["STRASSE", "stress"]
+ self.assertEqual(regex.match(r"(?fi)\L",
+ "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0,
+ 6))
+
+ options = ["stra\N{LATIN SMALL LETTER SHARP S}e"]
+ self.assertEqual(regex.match(r"(?fi)\L", "STRASSE",
+ words=options).span(), (0, 7))
+
+ options = ["kit"]
+ self.assertEqual(regex.search(r"(?i)\L", "SKITS",
+ words=options).span(), (1, 4))
+ self.assertEqual(regex.search(r"(?i)\L",
+ "SK\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}TS",
+ words=options).span(), (1, 4))
+
+ self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b",
+ " stra\N{LATIN SMALL LETTER SHARP S}e STRASSE ").span(), (1, 15))
+ self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b",
+ " STRASSE stra\N{LATIN SMALL LETTER SHARP S}e ").span(), (1, 15))
+
+ self.assertEqual(regex.search(r"^\L$", "", options=[]).span(),
+ (0, 0))
+
+ def test_fuzzy(self):
+ # Some tests borrowed from TRE library tests.
+ self.assertEqual(repr(type(regex.compile('(fou){s,e<=1}'))),
+ self.PATTERN_CLASS)
+ self.assertEqual(repr(type(regex.compile('(fuu){s}'))),
+ self.PATTERN_CLASS)
+ self.assertEqual(repr(type(regex.compile('(fuu){s,e}'))),
+ self.PATTERN_CLASS)
+ self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1}'))),
+ self.PATTERN_CLASS)
+ self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1,e<=10}'))),
+ self.PATTERN_CLASS)
+ self.assertEqual(repr(type(regex.compile('(anaconda){s<=1,e<=1,1i+1d<1}'))),
+ self.PATTERN_CLASS)
+
+ text = 'molasses anaconda foo bar baz smith anderson '
+ self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<1}', text),
+ None)
+ self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<2}',
+ text).span(0, 1), ((9, 17), (9, 17)))
+ self.assertEqual(regex.search('(ananda){1i+1d<2}', text), None)
+ self.assertEqual(regex.search(r"(?:\bznacnda){e<=2}", text)[0],
+ "anaconda")
+ self.assertEqual(regex.search(r"(?:\bnacnda){e<=2}", text)[0],
+ "anaconda")
+
+ text = 'anaconda foo bar baz smith anderson'
+ self.assertEqual(regex.search('(fuu){i<=3,d<=3,e<=5}', text).span(0,
+ 1), ((0, 0), (0, 0)))
+ self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e<=5}',
+ text).span(0, 1), ((9, 10), (9, 10)))
+ self.assertEqual(regex.search('(fuu){i<=2,d<=2,e<=5}', text).span(0,
+ 1), ((7, 10), (7, 10)))
+ self.assertEqual(regex.search('(?e)(fuu){i<=2,d<=2,e<=5}',
+ text).span(0, 1), ((9, 10), (9, 10)))
+ self.assertEqual(regex.search('(fuu){i<=3,d<=3,e}', text).span(0, 1),
+ ((0, 0), (0, 0)))
+ self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e}', text).span(0,
+ 1), ((9, 10), (9, 10)))
+
+ self.assertEqual(repr(type(regex.compile('(approximate){s<=3,1i+1d<3}'))),
+ self.PATTERN_CLASS)
+
+ # No cost limit.
+ self.assertEqual(regex.search('(foobar){e}',
+ 'xirefoabralfobarxie').span(0, 1), ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('(?e)(foobar){e}',
+ 'xirefoabralfobarxie').span(0, 1), ((0, 3), (0, 3)))
+ self.assertEqual(regex.search('(?b)(foobar){e}',
+ 'xirefoabralfobarxie').span(0, 1), ((11, 16), (11, 16)))
+
+ # At most two errors.
+ self.assertEqual(regex.search('(foobar){e<=2}',
+ 'xirefoabrzlfd').span(0, 1), ((4, 9), (4, 9)))
+ self.assertEqual(regex.search('(foobar){e<=2}', 'xirefoabzlfd'), None)
+
+ # At most two inserts or substitutions and max two errors total.
+ self.assertEqual(regex.search('(foobar){i<=2,s<=2,e<=2}',
+ 'oobargoobaploowap').span(0, 1), ((5, 11), (5, 11)))
+
+ # Find best whole word match for "foobar".
+ self.assertEqual(regex.search('\\b(foobar){e}\\b', 'zfoobarz').span(0,
+ 1), ((0, 8), (0, 8)))
+ self.assertEqual(regex.search('\\b(foobar){e}\\b',
+ 'boing zfoobarz goobar woop').span(0, 1), ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('(?b)\\b(foobar){e}\\b',
+ 'boing zfoobarz goobar woop').span(0, 1), ((15, 21), (15, 21)))
+
+ # Match whole string, allow only 1 error.
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobar').span(0, 1),
+ ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobar').span(0,
+ 1), ((0, 7), (0, 7)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarx').span(0,
+ 1), ((0, 7), (0, 7)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooxbar').span(0,
+ 1), ((0, 7), (0, 7)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbar').span(0, 1),
+ ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'xoobar').span(0, 1),
+ ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobax').span(0, 1),
+ ((0, 6), (0, 6)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'oobar').span(0, 1),
+ ((0, 5), (0, 5)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'fobar').span(0, 1),
+ ((0, 5), (0, 5)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooba').span(0, 1),
+ ((0, 5), (0, 5)))
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobarx'), None)
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarxx'), None)
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'xxfoobar'), None)
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoxbar'), None)
+ self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbarx'), None)
+
+ # At most one insert, two deletes, and three substitutions.
+ # Additionally, deletes cost two and substitutes one, and total
+ # cost must be less than 4.
+ self.assertEqual(regex.search('(foobar){i<=1,d<=2,s<=3,2d+1s<4}',
+ '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((6, 13), (6,
+ 13)))
+ self.assertEqual(regex.search('(?b)(foobar){i<=1,d<=2,s<=3,2d+1s<4}',
+ '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((34, 39),
+ (34, 39)))
+
+ # Partially fuzzy matches.
+ self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobarzap').span(0,
+ 1), ((0, 9), (3, 6)))
+ self.assertEqual(regex.search('foo(bar){e<=1}zap', 'fobarzap'), None)
+ self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobrzap').span(0,
+ 1), ((0, 8), (3, 5)))
+
+ text = ('www.cnn.com 64.236.16.20\nwww.slashdot.org 66.35.250.150\n'
+ 'For useful information, use www.slashdot.org\nthis is demo data!\n')
+ self.assertEqual(regex.search(r'(?s)^.*(dot.org){e}.*$', text).span(0,
+ 1), ((0, 120), (120, 120)))
+ self.assertEqual(regex.search(r'(?es)^.*(dot.org){e}.*$', text).span(0,
+ 1), ((0, 120), (93, 100)))
+ self.assertEqual(regex.search(r'^.*(dot.org){e}.*$', text).span(0, 1),
+ ((0, 119), (24, 101)))
+
+ # Behaviour is unexpected, but arguably not wrong. It first finds the
+ # best match, then the best in what follows, etc.
+ self.assertEqual(regex.findall(r"\b\L{e<=1}\b",
+ " book cot dog desk ", words="cat dog".split()), ["cot", "dog"])
+ self.assertEqual(regex.findall(r"\b\L{e<=1}\b",
+ " book dog cot desk ", words="cat dog".split()), [" dog", "cot"])
+ self.assertEqual(regex.findall(r"(?e)\b\L{e<=1}\b",
+ " book dog cot desk ", words="cat dog".split()), ["dog", "cot"])
+ self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b",
+ " book cot dog desk ", words="cat dog".split()), ["dog ", "cot"])
+ self.assertEqual(regex.findall(r"(?er)\b\L{e<=1}\b",
+ " book cot dog desk ", words="cat dog".split()), ["dog", "cot"])
+ self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b",
+ " book dog cot desk ", words="cat dog".split()), ["cot", "dog"])
+ self.assertEqual(regex.findall(br"\b\L{e<=1}\b",
+ b" book cot dog desk ", words=b"cat dog".split()), [b"cot", b"dog"])
+ self.assertEqual(regex.findall(br"\b\L{e<=1}\b",
+ b" book dog cot desk ", words=b"cat dog".split()), [b" dog", b"cot"])
+ self.assertEqual(regex.findall(br"(?e)\b\L{e<=1}\b",
+ b" book dog cot desk ", words=b"cat dog".split()), [b"dog", b"cot"])
+ self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b",
+ b" book cot dog desk ", words=b"cat dog".split()), [b"dog ", b"cot"])
+ self.assertEqual(regex.findall(br"(?er)\b\L{e<=1}\b",
+ b" book cot dog desk ", words=b"cat dog".split()), [b"dog", b"cot"])
+ self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b",
+ b" book dog cot desk ", words=b"cat dog".split()), [b"cot", b"dog"])
+
+ self.assertEqual(regex.search(r"(\w+) (\1{e<=1})", "foo fou").groups(),
+ ("foo", "fou"))
+ self.assertEqual(regex.search(r"(?r)(\2{e<=1}) (\w+)",
+ "foo fou").groups(), ("foo", "fou"))
+ self.assertEqual(regex.search(br"(\w+) (\1{e<=1})",
+ b"foo fou").groups(), (b"foo", b"fou"))
+
+ self.assertEqual(regex.findall(r"(?:(?:QR)+){e}", "abcde"), ["abcde",
+ ""])
+ self.assertEqual(regex.findall(r"(?:Q+){e}", "abc"), ["abc", ""])
+
+ # Hg issue 41: = for fuzzy matches
+ self.assertEqual(regex.match(r"(?:service detection){0[^()]+)|(?R))*\)", "(ab(cd)ef)")[
+ : ], ("(ab(cd)ef)", "ef"))
+ self.assertEqual(regex.search(r"\(((?>[^()]+)|(?R))*\)",
+ "(ab(cd)ef)").captures(1), ["ab", "cd", "(cd)", "ef"])
+
+ self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)",
+ "(ab(cd)ef)")[ : ], ("(ab(cd)ef)", "ab"))
+ self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)",
+ "(ab(cd)ef)").captures(1), ["ef", "cd", "(cd)", "ab"])
+
+ self.assertEqual(regex.search(r"\(([^()]+|(?R))*\)",
+ "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "e"))
+
+ self.assertEqual(regex.search(r"(?r)\(((?R)|[^()]+)*\)",
+ "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "a"))
+
+ self.assertEqual(regex.search(r"(foo(\(((?:(?>[^()]+)|(?2))*)\)))",
+ "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))",
+ "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))",
+ "bar(baz)+baz(bop)"))
+
+ self.assertEqual(regex.search(r"(?r)(foo(\(((?:(?2)|(?>[^()]+))*)\)))",
+ "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))",
+ "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))",
+ "bar(baz)+baz(bop)"))
+
+ rgx = regex.compile(r"""^\s*(<\s*([a-zA-Z:]+)(?:\s*[a-zA-Z:]*\s*=\s*(?:'[^']*'|"[^"]*"))*\s*(/\s*)?>(?:[^<>]*|(?1))*(?(3)|<\s*/\s*\2\s*>))\s*$""")
+ self.assertEqual(bool(rgx.search('')), True)
+ self.assertEqual(bool(rgx.search('')), False)
+ self.assertEqual(bool(rgx.search('')), True)
+ self.assertEqual(bool(rgx.search('')), False)
+ self.assertEqual(bool(rgx.search('')), False)
+
+ self.assertEqual(bool(rgx.search('')), False)
+ self.assertEqual(bool(rgx.search('')), True)
+ self.assertEqual(bool(rgx.search('< fooo / >')), True)
+ # The next regex should and does match. Perl 5.14 agrees.
+ #self.assertEqual(bool(rgx.search('foo')), False)
+ self.assertEqual(bool(rgx.search('foo')), False)
+
+ self.assertEqual(bool(rgx.search('foo')), True)
+ self.assertEqual(bool(rgx.search('foo')), True)
+ self.assertEqual(bool(rgx.search('')), True)
+
+ def test_copy(self):
+ # PatternObjects are immutable, therefore there's no need to clone them.
+ r = regex.compile("a")
+ self.assertTrue(copy.copy(r) is r)
+ self.assertTrue(copy.deepcopy(r) is r)
+
+ # MatchObjects are normally mutable because the target string can be
+ # detached. However, after the target string has been detached, a
+ # MatchObject becomes immutable, so there's no need to clone it.
+ m = r.match("a")
+ self.assertTrue(copy.copy(m) is not m)
+ self.assertTrue(copy.deepcopy(m) is not m)
+
+ self.assertTrue(m.string is not None)
+ m2 = copy.copy(m)
+ m2.detach_string()
+ self.assertTrue(m.string is not None)
+ self.assertTrue(m2.string is None)
+
+ # The following behaviour matches that of the re module.
+ it = regex.finditer(".", "ab")
+ it2 = copy.copy(it)
+ self.assertEqual(next(it).group(), "a")
+ self.assertEqual(next(it2).group(), "b")
+
+ # The following behaviour matches that of the re module.
+ it = regex.finditer(".", "ab")
+ it2 = copy.deepcopy(it)
+ self.assertEqual(next(it).group(), "a")
+ self.assertEqual(next(it2).group(), "b")
+
+ # The following behaviour is designed to match that of copying 'finditer'.
+ it = regex.splititer(" ", "a b")
+ it2 = copy.copy(it)
+ self.assertEqual(next(it), "a")
+ self.assertEqual(next(it2), "b")
+
+ # The following behaviour is designed to match that of copying 'finditer'.
+ it = regex.splititer(" ", "a b")
+ it2 = copy.deepcopy(it)
+ self.assertEqual(next(it), "a")
+ self.assertEqual(next(it2), "b")
+
+ def test_format(self):
+ self.assertEqual(regex.subf(r"(\w+) (\w+)", "{0} => {2} {1}",
+ "foo bar"), "foo bar => bar foo")
+ self.assertEqual(regex.subf(r"(?\w+) (?\w+)",
+ "{word2} {word1}", "foo bar"), "bar foo")
+
+ self.assertEqual(regex.subfn(r"(\w+) (\w+)", "{0} => {2} {1}",
+ "foo bar"), ("foo bar => bar foo", 1))
+ self.assertEqual(regex.subfn(r"(?\w+) (?\w+)",
+ "{word2} {word1}", "foo bar"), ("bar foo", 1))
+
+ self.assertEqual(regex.match(r"(\w+) (\w+)",
+ "foo bar").expandf("{0} => {2} {1}"), "foo bar => bar foo")
+
+ def test_fullmatch(self):
+ self.assertEqual(bool(regex.fullmatch(r"abc", "abc")), True)
+ self.assertEqual(bool(regex.fullmatch(r"abc", "abcx")), False)
+ self.assertEqual(bool(regex.fullmatch(r"abc", "abcx", endpos=3)), True)
+
+ self.assertEqual(bool(regex.fullmatch(r"abc", "xabc", pos=1)), True)
+ self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1)), False)
+ self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1,
+ endpos=4)), True)
+
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abc")), True)
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx")), False)
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx", endpos=3)),
+ True)
+
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabc", pos=1)),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1)),
+ False)
+ self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1,
+ endpos=4)), True)
+
+ def test_issue_18468(self):
+ self.assertTypedEqual(regex.sub('y', 'a', 'xyz'), 'xaz')
+ self.assertTypedEqual(regex.sub('y', StrSubclass('a'),
+ StrSubclass('xyz')), 'xaz')
+ self.assertTypedEqual(regex.sub(b'y', b'a', b'xyz'), b'xaz')
+ self.assertTypedEqual(regex.sub(b'y', BytesSubclass(b'a'),
+ BytesSubclass(b'xyz')), b'xaz')
+ self.assertTypedEqual(regex.sub(b'y', bytearray(b'a'),
+ bytearray(b'xyz')), b'xaz')
+ self.assertTypedEqual(regex.sub(b'y', memoryview(b'a'),
+ memoryview(b'xyz')), b'xaz')
+
+ for string in ":a:b::c", StrSubclass(":a:b::c"):
+ self.assertTypedEqual(regex.split(":", string), ['', 'a', 'b', '',
+ 'c'])
+ if sys.version_info >= (3, 7, 0):
+ self.assertTypedEqual(regex.split(":*", string), ['', '', 'a',
+ '', 'b', '', 'c', ''])
+ self.assertTypedEqual(regex.split("(:*)", string), ['', ':',
+ '', '', 'a', ':', '', '', 'b', '::', '', '', 'c', '', ''])
+ else:
+ self.assertTypedEqual(regex.split(":*", string), ['', 'a', 'b',
+ 'c'])
+ self.assertTypedEqual(regex.split("(:*)", string), ['', ':',
+ 'a', ':', 'b', '::', 'c'])
+
+ for string in (b":a:b::c", BytesSubclass(b":a:b::c"),
+ bytearray(b":a:b::c"), memoryview(b":a:b::c")):
+ self.assertTypedEqual(regex.split(b":", string), [b'', b'a', b'b',
+ b'', b'c'])
+ if sys.version_info >= (3, 7, 0):
+ self.assertTypedEqual(regex.split(b":*", string), [b'', b'',
+ b'a', b'', b'b', b'', b'c', b''])
+ self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':',
+ b'', b'', b'a', b':', b'', b'', b'b', b'::', b'', b'', b'c',
+ b'', b''])
+ else:
+ self.assertTypedEqual(regex.split(b":*", string), [b'', b'a',
+ b'b', b'c'])
+ self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':',
+ b'a', b':', b'b', b'::', b'c'])
+
+ for string in "a:b::c:::d", StrSubclass("a:b::c:::d"):
+ self.assertTypedEqual(regex.findall(":+", string), [":", "::",
+ ":::"])
+ self.assertTypedEqual(regex.findall("(:+)", string), [":", "::",
+ ":::"])
+ self.assertTypedEqual(regex.findall("(:)(:*)", string), [(":", ""),
+ (":", ":"), (":", "::")])
+
+ for string in (b"a:b::c:::d", BytesSubclass(b"a:b::c:::d"),
+ bytearray(b"a:b::c:::d"), memoryview(b"a:b::c:::d")):
+ self.assertTypedEqual(regex.findall(b":+", string), [b":", b"::",
+ b":::"])
+ self.assertTypedEqual(regex.findall(b"(:+)", string), [b":", b"::",
+ b":::"])
+ self.assertTypedEqual(regex.findall(b"(:)(:*)", string), [(b":",
+ b""), (b":", b":"), (b":", b"::")])
+
+ for string in 'a', StrSubclass('a'):
+ self.assertEqual(regex.match('a', string).groups(), ())
+ self.assertEqual(regex.match('(a)', string).groups(), ('a',))
+ self.assertEqual(regex.match('(a)', string).group(0), 'a')
+ self.assertEqual(regex.match('(a)', string).group(1), 'a')
+ self.assertEqual(regex.match('(a)', string).group(1, 1), ('a',
+ 'a'))
+
+ for string in (b'a', BytesSubclass(b'a'), bytearray(b'a'),
+ memoryview(b'a')):
+ self.assertEqual(regex.match(b'a', string).groups(), ())
+ self.assertEqual(regex.match(b'(a)', string).groups(), (b'a',))
+ self.assertEqual(regex.match(b'(a)', string).group(0), b'a')
+ self.assertEqual(regex.match(b'(a)', string).group(1), b'a')
+ self.assertEqual(regex.match(b'(a)', string).group(1, 1), (b'a',
+ b'a'))
+
+ def test_partial(self):
+ self.assertEqual(regex.match('ab', 'a', partial=True).partial, True)
+ self.assertEqual(regex.match('ab', 'a', partial=True).span(), (0, 1))
+ self.assertEqual(regex.match(r'cats', 'cat', partial=True).partial,
+ True)
+ self.assertEqual(regex.match(r'cats', 'cat', partial=True).span(), (0,
+ 3))
+ self.assertEqual(regex.match(r'cats', 'catch', partial=True), None)
+ self.assertEqual(regex.match(r'abc\w{3}', 'abcdef',
+ partial=True).partial, False)
+ self.assertEqual(regex.match(r'abc\w{3}', 'abcdef',
+ partial=True).span(), (0, 6))
+ self.assertEqual(regex.match(r'abc\w{3}', 'abcde',
+ partial=True).partial, True)
+ self.assertEqual(regex.match(r'abc\w{3}', 'abcde',
+ partial=True).span(), (0, 5))
+
+ self.assertEqual(regex.match(r'\d{4}$', '1234', partial=True).partial,
+ False)
+
+ self.assertEqual(regex.match(r'\L', 'post', partial=True,
+ words=['post']).partial, False)
+ self.assertEqual(regex.match(r'\L', 'post', partial=True,
+ words=['post']).span(), (0, 4))
+ self.assertEqual(regex.match(r'\L', 'pos', partial=True,
+ words=['post']).partial, True)
+ self.assertEqual(regex.match(r'\L', 'pos', partial=True,
+ words=['post']).span(), (0, 3))
+
+ self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True,
+ words=['po\uFB06']).partial, False)
+ self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True,
+ words=['po\uFB06']).span(), (0, 4))
+ self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True,
+ words=['po\uFB06']).partial, True)
+ self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True,
+ words=['po\uFB06']).span(), (0, 3))
+ self.assertEqual(regex.match(r'(?fi)\L', 'po\uFB06',
+ partial=True, words=['POS']), None)
+
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'a', partial=True).span(),
+ (0, 1))
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'ab', partial=True).span(),
+ (0, 2))
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'ab4', partial=True).span(),
+ (0, 3))
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'a4', partial=True).span(),
+ (0, 2))
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'a4R', partial=True).span(),
+ (0, 3))
+ self.assertEqual(regex.match(r'[a-z]*4R$', '4a', partial=True), None)
+ self.assertEqual(regex.match(r'[a-z]*4R$', 'a44', partial=True), None)
+
+ def test_hg_bugs(self):
+ # Hg issue 28: regex.compile("(?>b)") causes "TypeError: 'Character'
+ # object is not subscriptable"
+ self.assertEqual(bool(regex.compile("(?>b)", flags=regex.V1)), True)
+
+ # Hg issue 29: regex.compile("^((?>\w+)|(?>\s+))*$") causes
+ # "TypeError: 'GreedyRepeat' object is not iterable"
+ self.assertEqual(bool(regex.compile(r"^((?>\w+)|(?>\s+))*$",
+ flags=regex.V1)), True)
+
+ # Hg issue 31: atomic and normal groups in recursive patterns
+ self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)",
+ "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)'])
+ self.assertEqual(regex.findall(r"\((?:(?:[^()]+)|(?R))*\)",
+ "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)'])
+ self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)",
+ "a(b(cd)e)f)g)h"), ['(b(cd)e)'])
+ self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)",
+ "a(bc(d(e)f)gh"), ['(d(e)f)'])
+ self.assertEqual(regex.findall(r"(?r)\((?:(?>[^()]+)|(?R))*\)",
+ "a(bc(d(e)f)gh"), ['(d(e)f)'])
+ self.assertEqual([m.group() for m in
+ regex.finditer(r"\((?:[^()]*+|(?0))*\)", "a(b(c(de)fg)h")],
+ ['(c(de)fg)'])
+
+ # Hg issue 32: regex.search("a(bc)d", "abcd", regex.I|regex.V1) returns
+ # None
+ self.assertEqual(regex.search("a(bc)d", "abcd", regex.I |
+ regex.V1).group(0), "abcd")
+
+ # Hg issue 33: regex.search("([\da-f:]+)$", "E", regex.I|regex.V1)
+ # returns None
+ self.assertEqual(regex.search(r"([\da-f:]+)$", "E", regex.I |
+ regex.V1).group(0), "E")
+ self.assertEqual(regex.search(r"([\da-f:]+)$", "e", regex.I |
+ regex.V1).group(0), "e")
+
+ # Hg issue 34: regex.search("^(?=ab(de))(abd)(e)", "abde").groups()
+ # returns (None, 'abd', 'e') instead of ('de', 'abd', 'e')
+ self.assertEqual(regex.search("^(?=ab(de))(abd)(e)", "abde").groups(),
+ ('de', 'abd', 'e'))
+
+ # Hg issue 35: regex.compile("\ ", regex.X) causes "_regex_core.error:
+ # bad escape"
+ self.assertEqual(bool(regex.match(r"\ ", " ", flags=regex.X)), True)
+
+ # Hg issue 36: regex.search("^(a|)\1{2}b", "b") returns None
+ self.assertEqual(regex.search(r"^(a|)\1{2}b", "b").group(0, 1), ('b',
+ ''))
+
+ # Hg issue 37: regex.search("^(a){0,0}", "abc").group(0,1) returns
+ # ('a', 'a') instead of ('', None)
+ self.assertEqual(regex.search("^(a){0,0}", "abc").group(0, 1), ('',
+ None))
+
+ # Hg issue 38: regex.search("(?>.*/)b", "a/b") returns None
+ self.assertEqual(regex.search("(?>.*/)b", "a/b").group(0), "a/b")
+
+ # Hg issue 39: regex.search("((?i)blah)\\s+\\1", "blah BLAH") doesn't
+ # return None
+ # Changed to positional flags in regex 2023.12.23.
+ self.assertEqual(regex.search(r"((?i)blah)\s+\1", "blah BLAH"), None)
+
+ # Hg issue 40: regex.search("(\()?[^()]+(?(1)\)|)", "(abcd").group(0)
+ # returns "bcd" instead of "abcd"
+ self.assertEqual(regex.search(r"(\()?[^()]+(?(1)\)|)",
+ "(abcd").group(0), "abcd")
+
+ # Hg issue 42: regex.search("(a*)*", "a", flags=regex.V1).span(1)
+ # returns (0, 1) instead of (1, 1)
+ self.assertEqual(regex.search("(a*)*", "a").span(1), (1, 1))
+ self.assertEqual(regex.search("(a*)*", "aa").span(1), (2, 2))
+ self.assertEqual(regex.search("(a*)*", "aaa").span(1), (3, 3))
+
+ # Hg issue 43: regex.compile("a(?#xxx)*") causes "_regex_core.error:
+ # nothing to repeat"
+ self.assertEqual(regex.search("a(?#xxx)*", "aaa").group(), "aaa")
+
+ # Hg issue 44: regex.compile("(?=abc){3}abc") causes
+ # "_regex_core.error: nothing to repeat"
+ self.assertEqual(regex.search("(?=abc){3}abc", "abcabcabc").span(), (0,
+ 3))
+
+ # Hg issue 45: regex.compile("^(?:a(?:(?:))+)+") causes
+ # "_regex_core.error: nothing to repeat"
+ self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "a").span(), (0, 1))
+ self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "aa").span(), (0, 2))
+
+ # Hg issue 46: regex.compile("a(?x: b c )d") causes
+ # "_regex_core.error: missing )"
+ self.assertEqual(regex.search("a(?x: b c )d", "abcd").group(0), "abcd")
+
+ # Hg issue 47: regex.compile("a#comment\n*", flags=regex.X) causes
+ # "_regex_core.error: nothing to repeat"
+ self.assertEqual(regex.search("a#comment\n*", "aaa",
+ flags=regex.X).group(0), "aaa")
+
+ # Hg issue 48: regex.search("(a(?(1)\\1)){4}", "a"*10,
+ # flags=regex.V1).group(0,1) returns ('aaaaa', 'a') instead of ('aaaaaaaaaa', 'aaaa')
+ self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){1}",
+ "aaaaaaaaaa").span(0, 1), ((0, 1), (0, 1)))
+ self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){2}",
+ "aaaaaaaaaa").span(0, 1), ((0, 3), (1, 3)))
+ self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){3}",
+ "aaaaaaaaaa").span(0, 1), ((0, 6), (3, 6)))
+ self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){4}",
+ "aaaaaaaaaa").span(0, 1), ((0, 10), (6, 10)))
+
+ # Hg issue 49: regex.search("(a)(?<=b(?1))", "baz", regex.V1) returns
+ # None incorrectly
+ self.assertEqual(regex.search("(?V1)(a)(?<=b(?1))", "baz").group(0),
+ "a")
+
+ # Hg issue 50: not all keywords are found by named list with
+ # overlapping keywords when full Unicode casefolding is required
+ self.assertEqual(regex.findall(r'(?fi)\L',
+ 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05',
+ keywords=['post','pos']), ['POST', 'Post', 'post', 'po\u017Ft',
+ 'po\uFB06', 'po\uFB05'])
+ self.assertEqual(regex.findall(r'(?fi)pos|post',
+ 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POS',
+ 'Pos', 'pos', 'po\u017F', 'po\uFB06', 'po\uFB05'])
+ self.assertEqual(regex.findall(r'(?fi)post|pos',
+ 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST',
+ 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05'])
+ self.assertEqual(regex.findall(r'(?fi)post|another',
+ 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST',
+ 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05'])
+
+ # Hg issue 51: regex.search("((a)(?1)|(?2))", "a", flags=regex.V1)
+ # returns None incorrectly
+ self.assertEqual(regex.search("(?V1)((a)(?1)|(?2))", "a").group(0, 1,
+ 2), ('a', 'a', None))
+
+ # Hg issue 52: regex.search("(\\1xx|){6}", "xx",
+ # flags=regex.V1).span(0,1) returns incorrect value
+ self.assertEqual(regex.search(r"(?V1)(\1xx|){6}", "xx").span(0, 1),
+ ((0, 2), (2, 2)))
+
+ # Hg issue 53: regex.search("(a|)+", "a") causes MemoryError
+ self.assertEqual(regex.search("(a|)+", "a").group(0, 1), ("a", ""))
+
+ # Hg issue 54: regex.search("(a|)*\\d", "a"*80) causes MemoryError
+ self.assertEqual(regex.search(r"(a|)*\d", "a" * 80), None)
+
+ # Hg issue 55: regex.search("^(?:a?b?)*$", "ac") take a very long time.
+ self.assertEqual(regex.search("^(?:a?b?)*$", "ac"), None)
+
+ # Hg issue 58: bad named character escape sequences like "\\N{1}"
+ # treats as "N"
+ self.assertRaisesRegex(regex.error, self.UNDEF_CHAR_NAME, lambda:
+ regex.compile("\\N{1}"))
+
+ # Hg issue 59: regex.search("\\Z", "a\na\n") returns None incorrectly
+ self.assertEqual(regex.search("\\Z", "a\na\n").span(0), (4, 4))
+
+ # Hg issue 60: regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", "xayxay")
+ # returns None incorrectly
+ self.assertEqual(regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}",
+ "xayxay").group(0), "xayxay")
+
+ # Hg issue 61: regex.search("[^a]", "A", regex.I).group(0) returns ''
+ # incorrectly
+ self.assertEqual(regex.search("(?i)[^a]", "A"), None)
+
+ # Hg issue 63: regex.search("[[:ascii:]]", "\N{KELVIN SIGN}",
+ # flags=regex.I|regex.V1) doesn't return None
+ self.assertEqual(regex.search("(?i)[[:ascii:]]", "\N{KELVIN SIGN}"),
+ None)
+
+ # Hg issue 66: regex.search("((a|b(?1)c){3,5})", "baaaaca",
+ # flags=regex.V1).groups() returns ('baaaac', 'baaaac') instead of ('aaaa', 'a')
+ self.assertEqual(regex.search("((a|b(?1)c){3,5})", "baaaaca").group(0,
+ 1, 2), ('aaaa', 'aaaa', 'a'))
+
+ # Hg issue 71: non-greedy quantifier in lookbehind
+ self.assertEqual(regex.findall(r"(?<=:\S+ )\w+", ":9 abc :10 def"),
+ ['abc', 'def'])
+ self.assertEqual(regex.findall(r"(?<=:\S* )\w+", ":9 abc :10 def"),
+ ['abc', 'def'])
+ self.assertEqual(regex.findall(r"(?<=:\S+? )\w+", ":9 abc :10 def"),
+ ['abc', 'def'])
+ self.assertEqual(regex.findall(r"(?<=:\S*? )\w+", ":9 abc :10 def"),
+ ['abc', 'def'])
+
+ # Hg issue 73: conditional patterns
+ self.assertEqual(regex.search(r"(?:fe)?male", "female").group(),
+ "female")
+ self.assertEqual([m.group() for m in
+ regex.finditer(r"(fe)?male: h(?(1)(er)|(is)) (\w+)",
+ "female: her dog; male: his cat. asdsasda")], ['female: her dog',
+ 'male: his cat'])
+
+ # Hg issue 78: "Captures" doesn't work for recursive calls
+ self.assertEqual(regex.search(r'(?\((?:[^()]++|(?&rec))*\))',
+ 'aaa(((1+0)+1)+1)bbb').captures('rec'), ['(1+0)', '((1+0)+1)',
+ '(((1+0)+1)+1)'])
+
+ # Hg issue 80: Escape characters throws an exception
+ self.assertRaisesRegex(regex.error, self.BAD_ESCAPE, lambda:
+ regex.sub('x', '\\', 'x'), )
+
+ # Hg issue 82: error range does not work
+ fz = "(CAGCCTCCCATTTCAGAATATACATCC){1a(?b))', "ab").spans("x"), [(1,
+ 2), (0, 2)])
+
+ # Hg issue 91: match.expand is extremely slow
+ # Check that the replacement cache works.
+ self.assertEqual(regex.sub(r'(-)', lambda m: m.expand(r'x'), 'a-b-c'),
+ 'axbxc')
+
+ # Hg issue 94: Python crashes when executing regex updates
+ # pattern.findall
+ rx = regex.compile(r'\bt(est){i<2}', flags=regex.V1)
+ self.assertEqual(rx.search("Some text"), None)
+ self.assertEqual(rx.findall("Some text"), [])
+
+ # Hg issue 95: 'pos' for regex.error
+ self.assertRaisesRegex(regex.error, self.MULTIPLE_REPEAT, lambda:
+ regex.compile(r'.???'))
+
+ # Hg issue 97: behaviour of regex.escape's special_only is wrong
+ #
+ # Hg issue 244: Make `special_only=True` the default in
+ # `regex.escape()`
+ self.assertEqual(regex.escape('foo!?', special_only=False), 'foo\\!\\?')
+ self.assertEqual(regex.escape('foo!?', special_only=True), 'foo!\\?')
+ self.assertEqual(regex.escape('foo!?'), 'foo!\\?')
+
+ self.assertEqual(regex.escape(b'foo!?', special_only=False), b'foo\\!\\?')
+ self.assertEqual(regex.escape(b'foo!?', special_only=True),
+ b'foo!\\?')
+ self.assertEqual(regex.escape(b'foo!?'), b'foo!\\?')
+
+ # Hg issue 100: strange results from regex.search
+ self.assertEqual(regex.search('^([^z]*(?:WWWi|W))?$',
+ 'WWWi').groups(), ('WWWi', ))
+ self.assertEqual(regex.search('^([^z]*(?:WWWi|w))?$',
+ 'WWWi').groups(), ('WWWi', ))
+ self.assertEqual(regex.search('^([^z]*?(?:WWWi|W))?$',
+ 'WWWi').groups(), ('WWWi', ))
+
+ # Hg issue 101: findall() broken (seems like memory corruption)
+ pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.UNICODE)
+ self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx'])
+ self.assertEqual(pat.findall('yxxx'), ['xxx'])
+
+ raw = 'yxxx'
+ self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx'])
+ self.assertEqual(pat.findall(raw), ['xxx'])
+
+ pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.IGNORECASE |
+ regex.UNICODE)
+ self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx'])
+ self.assertEqual(pat.findall('yxxx'), ['xxx'])
+
+ raw = 'yxxx'
+ self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx'])
+ self.assertEqual(pat.findall(raw), ['xxx'])
+
+ # Hg issue 106: * operator not working correctly with sub()
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'xx')
+ else:
+ self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'x')
+ self.assertEqual(regex.sub('(?V1).*', 'x', 'test'), 'xx')
+
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|||||||||')
+ else:
+ self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|t|e|s|t|')
+ self.assertEqual(regex.sub('(?V1).*?', '|', 'test'), '|||||||||')
+
+ # Hg issue 112: re: OK, but regex: SystemError
+ self.assertEqual(regex.sub(r'^(@)\n(?!.*?@)(.*)',
+ r'\1\n==========\n\2', '@\n', flags=regex.DOTALL), '@\n==========\n')
+
+ # Hg issue 109: Edit distance of fuzzy match
+ self.assertEqual(regex.match(r'(?:cats|cat){e<=1}',
+ 'caz').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.match(r'(?e)(?:cats|cat){e<=1}',
+ 'caz').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.match(r'(?b)(?:cats|cat){e<=1}',
+ 'caz').fuzzy_counts, (1, 0, 0))
+
+ self.assertEqual(regex.match(r'(?:cat){e<=1}', 'caz').fuzzy_counts,
+ (1, 0, 0))
+ self.assertEqual(regex.match(r'(?e)(?:cat){e<=1}',
+ 'caz').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.match(r'(?b)(?:cat){e<=1}',
+ 'caz').fuzzy_counts, (1, 0, 0))
+
+ self.assertEqual(regex.match(r'(?:cats){e<=2}', 'c ats').fuzzy_counts,
+ (1, 1, 0))
+ self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}',
+ 'c ats').fuzzy_counts, (0, 1, 0))
+ self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}',
+ 'c ats').fuzzy_counts, (0, 1, 0))
+
+ self.assertEqual(regex.match(r'(?:cats){e<=2}',
+ 'c a ts').fuzzy_counts, (0, 2, 0))
+ self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}',
+ 'c a ts').fuzzy_counts, (0, 2, 0))
+ self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}',
+ 'c a ts').fuzzy_counts, (0, 2, 0))
+
+ self.assertEqual(regex.match(r'(?:cats){e<=1}', 'c ats').fuzzy_counts,
+ (0, 1, 0))
+ self.assertEqual(regex.match(r'(?e)(?:cats){e<=1}',
+ 'c ats').fuzzy_counts, (0, 1, 0))
+ self.assertEqual(regex.match(r'(?b)(?:cats){e<=1}',
+ 'c ats').fuzzy_counts, (0, 1, 0))
+
+ # Hg issue 115: Infinite loop when processing backreferences
+ self.assertEqual(regex.findall(r'\bof ([a-z]+) of \1\b',
+ 'To make use of one of these modules'), [])
+
+ # Hg issue 125: Reference to entire match (\g<0>) in
+ # Pattern.sub() doesn't work as of 2014.09.22 release.
+ self.assertEqual(regex.sub(r'x', r'\g<0>', 'x'), 'x')
+
+ # Unreported issue: no such builtin as 'ascii' in Python 2.
+ self.assertEqual(bool(regex.match(r'a', 'a', regex.DEBUG)), True)
+
+ # Hg issue 131: nested sets behaviour
+ self.assertEqual(regex.findall(r'(?V1)[[b-e]--cd]', 'abcdef'), ['b',
+ 'e'])
+ self.assertEqual(regex.findall(r'(?V1)[b-e--cd]', 'abcdef'), ['b',
+ 'e'])
+ self.assertEqual(regex.findall(r'(?V1)[[bcde]--cd]', 'abcdef'), ['b',
+ 'e'])
+ self.assertEqual(regex.findall(r'(?V1)[bcde--cd]', 'abcdef'), ['b',
+ 'e'])
+
+ # Hg issue 132: index out of range on null property \p{}
+ self.assertRaisesRegex(regex.error, '^unknown property at position 4$',
+ lambda: regex.compile(r'\p{}'))
+
+ # Issue 23692.
+ self.assertEqual(regex.match('(?:()|(?(1)()|z)){2}(?(2)a|z)',
+ 'a').group(0, 1, 2), ('a', '', ''))
+ self.assertEqual(regex.match('(?:()|(?(1)()|z)){0,2}(?(2)a|z)',
+ 'a').group(0, 1, 2), ('a', '', ''))
+
+ # Hg issue 137: Posix character class :punct: does not seem to be
+ # supported.
+
+ # Posix compatibility as recommended here:
+ # http://www.unicode.org/reports/tr18/#Compatibility_Properties
+
+ # Posix in Unicode.
+ chars = ''.join(chr(c) for c in range(0x10000))
+
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:alnum:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{PosixDigit}]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:alpha:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''\p{Alpha}+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:ascii:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[\p{InBasicLatin}]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:blank:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[\p{gc=Space_Separator}\t]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:cntrl:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''\p{gc=Control}+''', chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:digit:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[0-9]+''', chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:graph:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:lower:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''\p{Lower}+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:print:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''(?V1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:punct:]]+''',
+ chars))),
+ ascii(''.join(regex.findall(r'''(?V1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:space:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''\p{Whitespace}+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:upper:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''\p{Upper}+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:word:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''',
+ chars))))
+ self.assertEqual(ascii(''.join(regex.findall(r'''[[:xdigit:]]+''',
+ chars))), ascii(''.join(regex.findall(r'''[0-9A-Fa-f]+''',
+ chars))))
+
+ # Posix in ASCII.
+ chars = bytes(range(0x100))
+
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alnum:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{PosixDigit}]+''',
+ chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alpha:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Alpha}+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:ascii:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[\x00-\x7F]+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:blank:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{gc=Space_Separator}\t]+''',
+ chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:cntrl:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)\p{gc=Control}+''',
+ chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:digit:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9]+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:graph:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:lower:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Lower}+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:print:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:punct:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''',
+ chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:space:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Whitespace}+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:upper:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Upper}+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:word:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', chars))))
+ self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:xdigit:]]+''',
+ chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9A-Fa-f]+''', chars))))
+
+ # Hg issue 138: grapheme anchored search not working properly.
+ self.assertEqual(ascii(regex.search(r'\X$', 'ab\u2103').group()),
+ ascii('\u2103'))
+
+ # Hg issue 139: Regular expression with multiple wildcards where first
+ # should match empty string does not always work.
+ self.assertEqual(regex.search("([^L]*)([^R]*R)", "LtR").groups(), ('',
+ 'LtR'))
+
+ # Hg issue 140: Replace with REVERSE and groups has unexpected
+ # behavior.
+ self.assertEqual(regex.sub(r'(.)', r'x\1y', 'ab'), 'xayxby')
+ self.assertEqual(regex.sub(r'(?r)(.)', r'x\1y', 'ab'), 'xayxby')
+ self.assertEqual(regex.subf(r'(.)', 'x{1}y', 'ab'), 'xayxby')
+ self.assertEqual(regex.subf(r'(?r)(.)', 'x{1}y', 'ab'), 'xayxby')
+
+ # Hg issue 141: Crash on a certain partial match.
+ self.assertEqual(regex.fullmatch('(a)*abc', 'ab',
+ partial=True).span(), (0, 2))
+ self.assertEqual(regex.fullmatch('(a)*abc', 'ab',
+ partial=True).partial, True)
+
+ # Hg issue 143: Partial matches have incorrect span if prefix is '.'
+ # wildcard.
+ self.assertEqual(regex.search('OXRG', 'OOGOX', partial=True).span(),
+ (3, 5))
+ self.assertEqual(regex.search('.XRG', 'OOGOX', partial=True).span(),
+ (3, 5))
+ self.assertEqual(regex.search('.{1,3}XRG', 'OOGOX',
+ partial=True).span(), (1, 5))
+
+ # Hg issue 144: Latest version problem with matching 'R|R'.
+ self.assertEqual(regex.match('R|R', 'R').span(), (0, 1))
+
+ # Hg issue 146: Forced-fail (?!) works improperly in conditional.
+ self.assertEqual(regex.match(r'(.)(?(1)(?!))', 'xy'), None)
+
+ # Groups cleared after failure.
+ self.assertEqual(regex.findall(r'(y)?(\d)(?(1)\b\B)', 'ax1y2z3b'),
+ [('', '1'), ('', '2'), ('', '3')])
+ self.assertEqual(regex.findall(r'(y)?+(\d)(?(1)\b\B)', 'ax1y2z3b'),
+ [('', '1'), ('', '2'), ('', '3')])
+
+ # Hg issue 147: Fuzzy match can return match points beyond buffer end.
+ self.assertEqual([m.span() for m in regex.finditer(r'(?i)(?:error){e}',
+ 'regex failure')], [(0, 5), (5, 10), (10, 13), (13, 13)])
+ self.assertEqual([m.span() for m in
+ regex.finditer(r'(?fi)(?:error){e}', 'regex failure')], [(0, 5), (5,
+ 10), (10, 13), (13, 13)])
+
+ # Hg issue 150: Have an option for POSIX-compatible longest match of
+ # alternates.
+ self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))',
+ '10b12')[0], '10b12')
+ self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))',
+ '10E+12')[0], '10E+12')
+
+ self.assertEqual(regex.search(r'(?p)(\w|ae|oe|ue|ss)', 'ae')[0], 'ae')
+ self.assertEqual(regex.search(r'(?p)one(self)?(selfsufficient)?',
+ 'oneselfsufficient')[0], 'oneselfsufficient')
+
+ # Hg issue 151: Request: \K.
+ self.assertEqual(regex.search(r'(ab\Kcd)', 'abcd').group(0, 1), ('cd',
+ 'abcd'))
+ self.assertEqual(regex.findall(r'\w\w\K\w\w', 'abcdefgh'), ['cd',
+ 'gh'])
+ self.assertEqual(regex.findall(r'(\w\w\K\w\w)', 'abcdefgh'), ['abcd',
+ 'efgh'])
+
+ self.assertEqual(regex.search(r'(?r)(ab\Kcd)', 'abcd').group(0, 1),
+ ('ab', 'abcd'))
+ self.assertEqual(regex.findall(r'(?r)\w\w\K\w\w', 'abcdefgh'), ['ef',
+ 'ab'])
+ self.assertEqual(regex.findall(r'(?r)(\w\w\K\w\w)', 'abcdefgh'),
+ ['efgh', 'abcd'])
+
+ # Hg issue 152: Request: Request: (?(DEFINE)...).
+ self.assertEqual(regex.search(r'(?(DEFINE)(?\d+)(?- \w+))(?&quant) (?&item)',
+ '5 elephants')[0], '5 elephants')
+
+ self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').group('routine'), None)
+ self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').captures('routine'), ['a'])
+
+ # Hg issue 153: Request: (*SKIP).
+ self.assertEqual(regex.search(r'12(*FAIL)|3', '123')[0], '3')
+ self.assertEqual(regex.search(r'(?r)12(*FAIL)|3', '123')[0], '3')
+
+ self.assertEqual(regex.search(r'\d+(*PRUNE)\d', '123'), None)
+ self.assertEqual(regex.search(r'\d+(?=(*PRUNE))\d', '123')[0], '123')
+ self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123bcd')[0],
+ '123bcd')
+ self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123zzd')[0],
+ 'd')
+ self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123bcd')[0],
+ '3bcd')
+ self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123zzd')[0],
+ 'd')
+ self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$',
+ '123zzd')[0], '123zzd')
+ self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'\d++(?<=(*PRUNE)3)zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'\d++(?<=2(*PRUNE)3)zzd|[3d]$',
+ '124zzd')[0], 'd')
+
+ self.assertEqual(regex.search(r'(?r)\d(*PRUNE)\d+', '123'), None)
+ self.assertEqual(regex.search(r'(?r)\d(?<=(*PRUNE))\d+', '123')[0],
+ '123')
+ self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]',
+ '123bcd')[0], '123bcd')
+ self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]',
+ '123zzd')[0], 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$',
+ '123zzd')[0], '123zzd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=(*PRUNE)3)zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=2(*PRUNE)3)zzd|[3d]$',
+ '124zzd')[0], 'd')
+
+ self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123bcd')[0],
+ '123bcd')
+ self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123zzd')[0],
+ 'd')
+ self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123bcd')[0],
+ '3bcd')
+ self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123zzd')[0],
+ 'd')
+ self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$',
+ '123zzd')[0], '123zzd')
+ self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'\d++(?<=(*SKIP)3)zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'\d++(?<=2(*SKIP)3)zzd|[3d]$',
+ '124zzd')[0], 'd')
+
+ self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123bcd')[0],
+ '123bcd')
+ self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123zzd')[0],
+ 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$',
+ '123zzd')[0], '123zzd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=(*SKIP)3)zzd|[4d]$',
+ '124zzd')[0], 'd')
+ self.assertEqual(regex.search(r'(?r)\d++(?<=2(*SKIP)3)zzd|[3d]$',
+ '124zzd')[0], 'd')
+
+ # Hg issue 154: Segmentation fault 11 when working with an atomic group
+ text = """June 30, December 31, 2013 2012
+some words follow:
+more words and numbers 1,234,567 9,876,542
+more words and numbers 1,234,567 9,876,542"""
+ self.assertEqual(len(regex.findall(r'(?2014|2013 ?2012)', text)), 1)
+
+ # Hg issue 156: regression on atomic grouping
+ self.assertEqual(regex.match('1(?>2)', '12').span(), (0, 2))
+
+ # Hg issue 157: regression: segfault on complex lookaround
+ self.assertEqual(regex.match(r'(?V1w)(?=(?=[^A-Z]*+[A-Z])(?=[^a-z]*+[a-z]))(?=\D*+\d)(?=\p{Alphanumeric}*+\P{Alphanumeric})\A(?s:.){8,255}+\Z',
+ 'AAaa11!!')[0], 'AAaa11!!')
+
+ # Hg issue 158: Group issue with (?(DEFINE)...)
+ TEST_REGEX = regex.compile(r'''(?smx)
+(?(DEFINE)
+ (?
+ ^,[^,]+,
+ )
+)
+
+# Group 2 is defined on this line
+^,([^,]+),
+
+(?:(?!(?&subcat)[\r\n]+(?&subcat)).)+
+''')
+
+ TEST_DATA = '''
+,Cat 1,
+,Brand 1,
+some
+thing
+,Brand 2,
+other
+things
+,Cat 2,
+,Brand,
+Some
+thing
+'''
+
+ self.assertEqual([m.span(1, 2) for m in
+ TEST_REGEX.finditer(TEST_DATA)], [((-1, -1), (2, 7)), ((-1, -1), (54,
+ 59))])
+
+ # Hg issue 161: Unexpected fuzzy match results
+ self.assertEqual(regex.search('(abcdefgh){e}',
+ '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 14))
+ self.assertEqual(regex.search('(abcdefghi){e}',
+ '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 15))
+
+ # Hg issue 163: allow lookarounds in conditionals.
+ self.assertEqual(regex.match(r'(?:(?=\d)\d+\b|\w+)', '123abc').span(),
+ (0, 6))
+ self.assertEqual(regex.match(r'(?(?=\d)\d+\b|\w+)', '123abc'), None)
+ self.assertEqual(regex.search(r'(?(?<=love\s)you|(?<=hate\s)her)',
+ "I love you").span(), (7, 10))
+ self.assertEqual(regex.findall(r'(?(?<=love\s)you|(?<=hate\s)her)',
+ "I love you but I don't hate her either"), ['you', 'her'])
+
+ # Hg issue 180: bug of POSIX matching.
+ self.assertEqual(regex.search(r'(?p)a*(.*?)', 'aaabbb').group(0, 1),
+ ('aaabbb', 'bbb'))
+ self.assertEqual(regex.search(r'(?p)a*(.*)', 'aaabbb').group(0, 1),
+ ('aaabbb', 'bbb'))
+ self.assertEqual(regex.sub(r'(?p)a*(.*?)', r'\1', 'aaabbb'), 'bbb')
+ self.assertEqual(regex.sub(r'(?p)a*(.*)', r'\1', 'aaabbb'), 'bbb')
+
+ # Hg issue 192: Named lists reverse matching doesn't work with
+ # IGNORECASE and V1
+ self.assertEqual(regex.match(r'(?irV0)\L', '21', kw=['1']).span(),
+ (1, 2))
+ self.assertEqual(regex.match(r'(?irV1)\L', '21', kw=['1']).span(),
+ (1, 2))
+
+ # Hg issue 193: Alternation and .REVERSE flag.
+ self.assertEqual(regex.search('a|b', '111a222').span(), (3, 4))
+ self.assertEqual(regex.search('(?r)a|b', '111a222').span(), (3, 4))
+
+ # Hg issue 194: .FULLCASE and Backreference
+ self.assertEqual(regex.search(r'(?if)<(CLI)><\1>',
+ '').span(), (0, 10))
+ self.assertEqual(regex.search(r'(?if)<(CLI)><\1>',
+ '').span(), (0, 10))
+ self.assertEqual(regex.search(r'(?ifr)<\1><(CLI)>',
+ '').span(), (0, 10))
+
+ # Hg issue 195: Pickle (or otherwise serial) the compiled regex
+ r = regex.compile(r'\L', options=['foo', 'bar'])
+ p = pickle.dumps(r)
+ r = pickle.loads(p)
+ self.assertEqual(r.match('foo').span(), (0, 3))
+
+ # Hg issue 196: Fuzzy matching on repeated regex not working as
+ # expected
+ self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxxx',
+ flags=regex.BESTMATCH).span(), (0, 6))
+ self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxx',
+ flags=regex.BESTMATCH).span(), (0, 5))
+ self.assertEqual(regex.match('(x{6}){e<=1}', 'x',
+ flags=regex.BESTMATCH), None)
+ self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxxx',
+ flags=regex.BESTMATCH).span(), (0, 6))
+ self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxx',
+ flags=regex.BESTMATCH).span(), (0, 5))
+ self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'x',
+ flags=regex.BESTMATCH), None)
+
+ # Hg issue 197: ValueError in regex.compile
+ self.assertRaises(regex.error, lambda:
+ regex.compile(b'00000\\0\\00\\^\50\\00\\U05000000'))
+
+ # Hg issue 198: ValueError in regex.compile
+ self.assertRaises(regex.error, lambda: regex.compile(b"{e', '22', aa=['121',
+ '22'])), True)
+ self.assertEqual(bool(regex.search(r'(?ri)\L', '22', aa=['121',
+ '22'])), True)
+ self.assertEqual(bool(regex.search(r'(?fi)\L', '22', aa=['121',
+ '22'])), True)
+ self.assertEqual(bool(regex.search(r'(?fri)\L', '22', aa=['121',
+ '22'])), True)
+
+ # Hg issue 208: Named list, (?ri) flags, Backreference
+ self.assertEqual(regex.search(r'(?r)\1dog..(?<=(\L))$', 'ccdogcc',
+ aa=['bcb', 'cc']). span(), (0, 7))
+ self.assertEqual(regex.search(r'(?ir)\1dog..(?<=(\L))$',
+ 'ccdogcc', aa=['bcb', 'cc']). span(), (0, 7))
+
+ # Hg issue 210: Fuzzy matching and Backreference
+ self.assertEqual(regex.search(r'(2)(?:\1{5}){e<=1}',
+ '3222212').span(), (1, 7))
+ self.assertEqual(regex.search(r'(\d)(?:\1{5}){e<=1}',
+ '3222212').span(), (1, 7))
+
+ # Hg issue 211: Segmentation fault with recursive matches and atomic
+ # groups
+ self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''',
+ '((-))').span(), (0, 5))
+ self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''',
+ '((-)+)'), None)
+
+ # Hg issue 212: Unexpected matching difference with .*? between re and
+ # regex
+ self.assertEqual(regex.match(r"x.*? (.).*\1(.*)\1",
+ 'x |y| z|').span(), (0, 9))
+ self.assertEqual(regex.match(r"\.sr (.*?) (.)(.*)\2(.*)\2(.*)",
+ r'.sr h |||').span(), (0, 35))
+
+ # Hg issue 213: Segmentation Fault
+ a = '"\\xF9\\x80\\xAEqdz\\x95L\\xA7\\x89[\\xFE \\x91)\\xF9]\\xDB\'\\x99\\x09=\\x00\\xFD\\x98\\x22\\xDD\\xF1\\xB6\\xC3 Z\\xB6gv\\xA5x\\x93P\\xE1r\\x14\\x8Cv\\x0C\\xC0w\\x15r\\xFFc%" '
+ py_regex_pattern = r'''(?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``)))) (?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))))'''
+ self.assertEqual(bool(regex.search(py_regex_pattern, a)), False)
+
+ # Hg Issue 216: Invalid match when using negative lookbehind and pipe
+ self.assertEqual(bool(regex.match('foo(?<=foo)', 'foo')), True)
+ self.assertEqual(bool(regex.match('foo(?.*\!\w*\:.*)|(?P.*))',
+ '!')), False)
+
+ # Hg issue 220: Misbehavior of group capture with OR operand
+ self.assertEqual(regex.match(r'\w*(ea)\w*|\w*e(?!a)\w*',
+ 'easier').groups(), ('ea', ))
+
+ # Hg issue 225: BESTMATCH in fuzzy match not working
+ self.assertEqual(regex.search('(^1234$){i,d}', '12234',
+ regex.BESTMATCH).span(), (0, 5))
+ self.assertEqual(regex.search('(^1234$){i,d}', '12234',
+ regex.BESTMATCH).fuzzy_counts, (0, 1, 0))
+
+ self.assertEqual(regex.search('(^1234$){s,i,d}', '12234',
+ regex.BESTMATCH).span(), (0, 5))
+ self.assertEqual(regex.search('(^1234$){s,i,d}', '12234',
+ regex.BESTMATCH).fuzzy_counts, (0, 1, 0))
+
+ # Hg issue 226: Error matching at start of string
+ self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123',
+ regex.BESTMATCH).span(), (0, 11))
+ self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123',
+ regex.BESTMATCH).fuzzy_counts, (0, 8, 0))
+
+ # Hg issue 227: Incorrect behavior for ? operator with UNICODE +
+ # IGNORECASE
+ self.assertEqual(regex.search(r'a?yz', 'xxxxyz', flags=regex.FULLCASE |
+ regex.IGNORECASE).span(), (4, 6))
+
+ # Hg issue 230: Is it a bug of (?(DEFINE)...)
+ self.assertEqual(regex.findall(r'(?:(?![a-d]).)+', 'abcdefgh'),
+ ['efgh'])
+ self.assertEqual(regex.findall(r'''(?(DEFINE)(?P(?:(?![a-d]).)))(?&mydef)+''',
+ 'abcdefgh'), ['efgh'])
+
+ # Hg issue 238: Not fully re backward compatible
+ self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1,3}',
+ '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm',
+ '....'), ('T...', 'T', '...')])
+ self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){3}',
+ '"Erm....yes. T..T...Thank you for that."'), [])
+ self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){2}',
+ '"Erm....yes. T..T...Thank you for that."'), [('T...', 'T', '...')])
+ self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1}',
+ '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm',
+ '....'), ('T..', 'T', '..'), ('T...', 'T', '...')])
+
+ # Hg issue 247: Unexpected result with fuzzy matching and lookahead
+ # expression
+ self.assertEqual(regex.search(r'(?:ESTONIA(?!\w)){e<=1}',
+ 'ESTONIAN WORKERS').group(), 'ESTONIAN')
+ self.assertEqual(regex.search(r'(?:ESTONIA(?=\W)){e<=1}',
+ 'ESTONIAN WORKERS').group(), 'ESTONIAN')
+
+ self.assertEqual(regex.search(r'(?:(?.))(?&func)',
+ 'abc').groups(), (None, ))
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)',
+ 'abc').groupdict(), {'func': None})
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)',
+ 'abc').capturesdict(), {'func': ['a']})
+
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))',
+ 'abc').groups(), (None, ))
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))',
+ 'abc').groupdict(), {'func': None})
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))',
+ 'abc').capturesdict(), {'func': ['a']})
+
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))',
+ 'abc').groups(), (None, ))
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))',
+ 'abc').groupdict(), {'func': None})
+ self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))',
+ 'abc').capturesdict(), {'func': ['a']})
+
+ # Hg issue 271: Comment logic different between Re and Regex
+ self.assertEqual(bool(regex.match(r'ab(?#comment\))cd', 'abcd')), True)
+
+ # Hg issue 276: Partial Matches yield incorrect matches and bounds
+ self.assertEqual(regex.search(r'[a-z]+ [a-z]*?:', 'foo bar',
+ partial=True).span(), (0, 7))
+ self.assertEqual(regex.search(r'(?r):[a-z]*? [a-z]+', 'foo bar',
+ partial=True).span(), (0, 7))
+
+ # Hg issue 291: Include Script Extensions as a supported Unicode property
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script:Beng}',
+ '\u09EF')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script:Bengali}',
+ '\u09EF')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Bengali}',
+ '\u09EF')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Beng}',
+ '\u09EF')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Cakm}',
+ '\u09EF')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Sylo}',
+ '\u09EF')), True)
+
+ # Hg issue #293: scx (Script Extensions) property currently matches
+ # incorrectly
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Latin}', 'P')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Ahom}', 'P')), False)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Common}', '4')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Caucasian_Albanian}', '4')),
+ False)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Arabic}', '\u062A')), True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Balinese}', '\u062A')),
+ False)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Devanagari}', '\u091C')),
+ True)
+ self.assertEqual(bool(regex.match(r'(?u)\p{scx:Batak}', '\u091C')), False)
+
+ # Hg issue 296: Group references are not taken into account when group is reporting the last match
+ self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').captures('x'),
+ ['a', 'b', 'c'])
+ self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').group('x'),
+ 'b')
+
+ self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)',
+ 'abc').captures('x'), ['a', 'b', 'c'])
+ self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)',
+ 'abc').group('x'), 'c')
+
+ # Hg issue 299: Partial gives misleading results with "open ended" regexp
+ self.assertEqual(regex.match('(?:ab)*', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)*', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)*?', '', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)*+', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)*+', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)+', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)+', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)+?', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)++', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?:ab)++', 'abab', partial=True).partial,
+ False)
+
+ self.assertEqual(regex.match('(?r)(?:ab)*', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)*', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)*?', '', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)*+', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)*+', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)+', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)+', 'abab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)+?', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)++', 'ab', partial=True).partial,
+ False)
+ self.assertEqual(regex.match('(?r)(?:ab)++', 'abab', partial=True).partial,
+ False)
+
+ self.assertEqual(regex.match('a*', '', partial=True).partial, False)
+ self.assertEqual(regex.match('a*?', '', partial=True).partial, False)
+ self.assertEqual(regex.match('a*+', '', partial=True).partial, False)
+ self.assertEqual(regex.match('a+', '', partial=True).partial, True)
+ self.assertEqual(regex.match('a+?', '', partial=True).partial, True)
+ self.assertEqual(regex.match('a++', '', partial=True).partial, True)
+ self.assertEqual(regex.match('a+', 'a', partial=True).partial, False)
+ self.assertEqual(regex.match('a+?', 'a', partial=True).partial, False)
+ self.assertEqual(regex.match('a++', 'a', partial=True).partial, False)
+
+ self.assertEqual(regex.match('(?r)a*', '', partial=True).partial, False)
+ self.assertEqual(regex.match('(?r)a*?', '', partial=True).partial, False)
+ self.assertEqual(regex.match('(?r)a*+', '', partial=True).partial, False)
+ self.assertEqual(regex.match('(?r)a+', '', partial=True).partial, True)
+ self.assertEqual(regex.match('(?r)a+?', '', partial=True).partial, True)
+ self.assertEqual(regex.match('(?r)a++', '', partial=True).partial, True)
+ self.assertEqual(regex.match('(?r)a+', 'a', partial=True).partial, False)
+ self.assertEqual(regex.match('(?r)a+?', 'a', partial=True).partial, False)
+ self.assertEqual(regex.match('(?r)a++', 'a', partial=True).partial, False)
+
+ self.assertEqual(regex.match(r"(?:\s*\w+'*)+", 'whatever', partial=True).partial,
+ False)
+
+ # Hg issue 300: segmentation fault
+ pattern = ('(?PGGCGTCACACTTTGCTATGCCATAGCAT[AG]TTTATCCATAAGA'
+ 'TTAGCGGATCCTACCTGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGA'
+ 'CTATCCGGTATTACCCGGCATGACAGGAGTAAAA){e<=1}'
+ '(?P[ACGT]{1059}){e<=2}'
+ '(?PTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAG'
+ 'TTGTAAGGATATGCCATTCTAGA){e<=0}'
+ '(?P[ACGT]{18}){e<=0}'
+ '(?PAGATCGG[CT]AGAGCGTCGTGTAGGGAAAGAGTGTGG){e<=1}')
+
+ text = ('GCACGGCGTCACACTTTGCTATGCCATAGCATATTTATCCATAAGATTAGCGGATCCTACC'
+ 'TGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGACTATCCGGTATTACC'
+ 'CGGCATGACAGGAGTAAAAATGGCTATCGACGAAAACAAACAGAAAGCGTTGGCGGCAGCACTGGGC'
+ 'CAGATTGAGAAACAATTTGGTAAAGGCTCCATCATGCGCCTGGGTGAAGACCGTTCCATGGATGTGG'
+ 'AAACCATCTCTACCGGTTCGCTTTCACTGGATATCGCGCTTGGGGCAGGTGGTCTGCCGATGGGCCG'
+ 'TATCGTCGAAATCTACGGACCGGAATCTTCCGGTAAAACCACGCTGACGCTGCAGGTGATCGCCGCA'
+ 'GCGCAGCGTGAAGGTAAAACCTGTGCGTTTATCGATGCTGAACACGCGCTGGACCCAATCTACGCAC'
+ 'GTAAACTGGGCGTCGATATCGACAACCTGCTGTGCTCCCAGCCGGACACCGGCGAGCAGGCACTGGA'
+ 'AATCTGTGACGCCCTGGCGCGTTCTGGCGCAGTAGACGTTATCGTCGTTGACTCCGTGGCGGCACTG'
+ 'ACGCCGAAAGCGGAAATCGAAGGCGAAATCGGCGACTCTCATATGGGCCTTGCGGCACGTATGATGA'
+ 'GCCAGGCGATGCGTAAGCTGGCGGGTAACCTGAAGCAGTCCAACACGCTGCTGATCTTCATCAACCC'
+ 'CATCCGTATGAAAATTGGTGTGATGTTCGGCAACCCGGAAACCACTTACCGGTGGTAACGCGCTGAA'
+ 'ATTCTACGCCTCTGTTCGTCTCGACATCCGTTAAATCGGCGCGGTGAAAGAGGGCGAAAACGTGGTG'
+ 'GGTAGCGAAACCCGCGTGAAAGTGGTGAAGAACAAAATCGCTGCGCCGTTTAAACAGGCTGAATTCC'
+ 'AGATCCTCTACGGCGAAGGTATCAACTTCTACCCCGAACTGGTTGACCTGGGCGTAAAAGAGAAGCT'
+ 'GATCGAGAAAGCAGGCGCGTGGTACAGCTACAAAGGTGAGAAGATCGGTCAGGGTAAAGCGAATGCG'
+ 'ACTGCCTGGCTGAAATTTAACCCGGAAACCGCGAAAGAGATCGAGTGAAAAGTACGTGAGTTGCTGC'
+ 'TGAGCAACCCGAACTCAACGCCGGATTTCTCTGTAGATGATAGCGAAGGCGTAGCAGAAACTAACGA'
+ 'AGATTTTTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAGTTGT'
+ 'AAGGATATGCCATTCTAGACAGTTAACACACCAACAAAGATCGGTAGAGCGTCGTGTAGGGAAAGAG'
+ 'TGTGGTACC')
+
+ m = regex.search(pattern, text, flags=regex.BESTMATCH)
+ self.assertEqual(m.fuzzy_counts, (0, 1, 0))
+ self.assertEqual(m.fuzzy_changes, ([], [1206], []))
+
+ # Hg issue 306: Fuzzy match parameters not respecting quantifier scope
+ self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}',
+ 'dogfood').fuzzy_counts, (0, 0, 0))
+ self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}',
+ 'dogfoot').fuzzy_counts, (1, 0, 0))
+
+ # Hg issue 312: \X not matching graphemes with zero-width-joins
+ self.assertEqual(regex.findall(r'\X',
+ '\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466'),
+ ['\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466'])
+
+ # Hg issue 320: Abnormal performance
+ self.assertEqual(bool(regex.search(r'(?=a)a', 'a')), True)
+ self.assertEqual(bool(regex.search(r'(?!b)a', 'a')), True)
+
+ # Hg issue 327: .fullmatch() causes MemoryError
+ self.assertEqual(regex.fullmatch(r'((\d)*?)*?', '123').span(), (0, 3))
+
+ # Hg issue 329: Wrong group matches when question mark quantifier is used within a look behind
+ self.assertEqual(regex.search(r'''(?(DEFINE)(?(?THIS_SHOULD_NOT_MATCHx?)|(?right))).*(?<=(?&mydef).*)''',
+ 'x right').capturesdict(), {'mydef': ['right'], 'wrong': [], 'right':
+ ['right']})
+
+ # Hg issue 338: specifying allowed characters when fuzzy-matching
+ self.assertEqual(bool(regex.match(r'(?:cat){e<=1:[u]}', 'cut')), True)
+ self.assertEqual(bool(regex.match(r'(?:cat){e<=1:u}', 'cut')), True)
+
+ # Hg issue 353: fuzzy changes negative indexes
+ self.assertEqual(regex.search(r'(?be)(AGTGTTCCCCGCGCCAGCGGGGATAAACCG){s<=5,i<=5,d<=5,s+i+d<=10}',
+ 'TTCCCCGCGCCAGCGGGGATAAACCG').fuzzy_changes, ([], [], [0, 1, 3, 5]))
+
+ # Git issue 364: Contradictory values in fuzzy_counts and fuzzy_changes
+ self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_counts, (1, 0,
+ 1))
+ self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_changes, ([0],
+ [], [1]))
+ self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_counts, (0,
+ 0, 1))
+ self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_changes,
+ ([], [], [0]))
+ self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_counts, (0,
+ 0, 1))
+ self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_changes,
+ ([], [], [0]))
+
+ # Git issue 370: Confusions about Fuzzy matching behavior
+ self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){e}',
+ '$ 10,112.111.12').fuzzy_counts, (6, 0, 5))
+ self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1}',
+ '$ 10,112.111.12').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1,i<=1,d<=1}',
+ '$ 10,112.111.12').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=3}',
+ '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0))
+ self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=2}',
+ '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0))
+
+ self.assertEqual(regex.fullmatch(r'(?e)(?:0?,0(?:,0)?){s<=1,d<=1}',
+ ',0;0').fuzzy_counts, (1, 0, 0))
+ self.assertEqual(regex.fullmatch(r'(?e)(?:0??,0(?:,0)?){s<=1,d<=1}',
+ ',0;0').fuzzy_counts, (1, 0, 0))
+
+ # Git issue 371: Specifying character set when fuzzy-matching allows characters not in the set
+ self.assertEqual(regex.search(r"\b(?e)(?:\d{6,20}){i<=5:[\-\\\/]}\b",
+ "cat dog starting at 00:01132.000. hello world"), None)
+
+ # Git issue 385: Comments in expressions
+ self.assertEqual(bool(regex.compile('(?#)')), True)
+ self.assertEqual(bool(regex.compile('(?x)(?#)')), True)
+
+ # Git issue 394: Unexpected behaviour in fuzzy matching with limited character set with IGNORECASE flag
+ self.assertEqual(regex.findall(r'(\d+){i<=2:[ab]}', '123X4Y5'),
+ ['123', '4', '5'])
+ self.assertEqual(regex.findall(r'(?i)(\d+){i<=2:[ab]}', '123X4Y5'),
+ ['123', '4', '5'])
+
+ # Git issue 403: Fuzzy matching with wrong distance (unnecessary substitutions)
+ self.assertEqual(regex.match(r'^(test){e<=5}$', 'terstin',
+ flags=regex.B).fuzzy_counts, (0, 3, 0))
+
+ # Git issue 408: regex fails with a quantified backreference but succeeds with repeated backref
+ self.assertEqual(bool(regex.match(r"(?:(x*)\1\1\1)*x$", "x" * 5)), True)
+ self.assertEqual(bool(regex.match(r"(?:(x*)\1{3})*x$", "x" * 5)), True)
+
+ # Git issue 415: Fuzzy character restrictions don't apply to insertions at "right edge"
+ self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'te5t').group(),
+ 'te5t')
+ self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'tezt'), None)
+ self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'tes5t').group(),
+ 'tes5t')
+ self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'teszt'), None)
+ self.assertEqual(regex.match(r't(?:es){i<=1:\d}t',
+ 'tes5t').fuzzy_changes, ([], [3], []))
+ self.assertEqual(regex.match(r't(es){i<=1,0.*)(?PCTTCC){e<=1}(?P([ACGT]){4,6})(?PCAATACCGACTCCTCACTGTGT){e<=2}(?P([ACGT]){0,6}$)'
+
+ m = regex.match(pattern, sequence, flags=regex.BESTMATCH)
+ self.assertEqual(m.span(), (0, 50))
+ self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'})
+
+ m = regex.match(pattern, sequence, flags=regex.ENHANCEMATCH)
+ self.assertEqual(m.span(), (0, 50))
+ self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'})
+
+ # Git issue 433: Disagreement between fuzzy_counts and fuzzy_changes
+ pattern = r'(?P.*)(?PAACACTGG){e<=1}(?P([AT][CG]){5}){e<=2}(?PGTAACCGAAG){e<=2}(?P([ACGT]){0,6}$)'
+
+ sequence = 'GGAAAACACTGGTCTCAGTCTCGTAACCGAAGTGGTCG'
+ m = regex.match(pattern, sequence, flags=regex.BESTMATCH)
+ self.assertEqual(m.fuzzy_counts, (0, 0, 0))
+ self.assertEqual(m.fuzzy_changes, ([], [], []))
+
+ sequence = 'GGAAAACACTGGTCTCAGTCTCGTCCCCGAAGTGGTCG'
+ m = regex.match(pattern, sequence, flags=regex.BESTMATCH)
+ self.assertEqual(m.fuzzy_counts, (2, 0, 0))
+ self.assertEqual(m.fuzzy_changes, ([24, 25], [], []))
+
+ # Git issue 439: Unmatched groups: sub vs subf
+ self.assertEqual(regex.sub(r'(test1)|(test2)', r'matched: \1\2', 'test1'), 'matched: test1')
+ self.assertEqual(regex.subf(r'(test1)|(test2)', r'matched: {1}{2}', 'test1'), 'matched: test1')
+ self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expand(r'matched: \1\2'), 'matched: test1'),
+ self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expandf(r'matched: {1}{2}'), 'matched: test1')
+
+ # Git issue 442: Fuzzy regex matching doesn't seem to test insertions correctly
+ self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having"), None)
+ self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having", flags=regex.I), None)
+
+ # Git issue 467: Scoped inline flags 'a', 'u' and 'L' affect global flags
+ self.assertEqual(regex.match(r'(?a:\w)\w', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2))
+ self.assertEqual(regex.match(r'(?a:\w)(?u:\w)', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2))
+
+ # Git issue 473: Emoji classified as letter
+ self.assertEqual(regex.match(r'^\p{LC}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}'), None)
+ self.assertEqual(regex.match(r'^\p{So}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}').span(), (0, 1))
+
+ # Git issue 474: regex has no equivalent to `re.Match.groups()` for captures
+ self.assertEqual(regex.match(r'(.)+', 'abc').allcaptures(), (['abc'], ['a', 'b', 'c']))
+ self.assertEqual(regex.match(r'(.)+', 'abc').allspans(), ([(0, 3)], [(0, 1), (1, 2), (2, 3)]))
+
+ # Git issue 477: \v for vertical spacing
+ self.assertEqual(bool(regex.fullmatch(r'\p{HorizSpace}+', '\t \xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000')), True)
+ self.assertEqual(bool(regex.fullmatch(r'\p{VertSpace}+', '\n\v\f\r\x85\u2028\u2029')), True)
+
+ # Git issue 479: Segmentation fault when using conditional pattern
+ self.assertEqual(regex.match(r'(?(?<=A)|(?(?![^B])C|D))', 'A'), None)
+ self.assertEqual(regex.search(r'(?(?<=A)|(?(?![^B])C|D))', 'A').span(), (1, 1))
+
+ # Git issue 494: Backtracking failure matching regex ^a?(a?)b?c\1$ against string abca
+ self.assertEqual(regex.search(r"^a?(a?)b?c\1$", "abca").span(), (0, 4))
+
+ # Git issue 498: Conditional negative lookahead inside positive lookahead fails to match
+ self.assertEqual(regex.match(r'(?(?=a).|..)', 'ab').span(), (0, 1))
+ self.assertEqual(regex.match(r'(?(?=b).|..)', 'ab').span(), (0, 2))
+ self.assertEqual(regex.match(r'(?(?!a).|..)', 'ab').span(), (0, 2))
+ self.assertEqual(regex.match(r'(?(?!b).|..)', 'ab').span(), (0, 1))
+
+ def test_fuzzy_ext(self):
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', 'e')),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'e')),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', '-')),
+ False)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', '-')),
+ False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'ae')),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}',
+ 'ae')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'a-')),
+ False)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}',
+ 'a-')), False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'ae')),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}',
+ 'ae')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'a-')),
+ False)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}',
+ 'a-')), False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'ae')),
+ True)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)',
+ 'ea')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'a-')),
+ False)
+ self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)',
+ '-a')), False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 'ts')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 'st')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 'st')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 'ts')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ '-s')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 's-')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ 's-')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}',
+ '-s')), False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ 'ssst')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ 'ssts')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})',
+ 'stss')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})',
+ 'tsss')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ 'ss-s')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ 'sss-')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ '-s')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}',
+ 's-')), False)
+
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}',
+ '\N{LATIN SMALL LETTER SHARP S}ts')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}',
+ '\N{LATIN SMALL LETTER SHARP S}st')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)',
+ 'st\N{LATIN SMALL LETTER SHARP S}')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)',
+ 'ts\N{LATIN SMALL LETTER SHARP S}')), True)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}',
+ '\N{LATIN SMALL LETTER SHARP S}-s')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}',
+ '\N{LATIN SMALL LETTER SHARP S}s-')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}',
+ 's-\N{LATIN SMALL LETTER SHARP S}')), False)
+ self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}',
+ '-s\N{LATIN SMALL LETTER SHARP S}')), False)
+
+ def test_subscripted_captures(self):
+ self.assertEqual(regex.match(r'(?P.)+',
+ 'abc').expandf('{0} {0[0]} {0[-1]}'), 'abc abc abc')
+ self.assertEqual(regex.match(r'(?P.)+',
+ 'abc').expandf('{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}'),
+ 'c a b c c b a')
+ self.assertEqual(regex.match(r'(?P.)+',
+ 'abc').expandf('{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}'),
+ 'c a b c c b a')
+
+ self.assertEqual(regex.subf(r'(?P.)+', r'{0} {0[0]} {0[-1]}',
+ 'abc'), 'abc abc abc')
+ self.assertEqual(regex.subf(r'(?P.)+',
+ '{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}', 'abc'),
+ 'c a b c c b a')
+ self.assertEqual(regex.subf(r'(?P.)+',
+ '{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}', 'abc'),
+ 'c a b c c b a')
+
+ def test_more_zerowidth(self):
+ if sys.version_info >= (3, 7, 0):
+ self.assertEqual(regex.split(r'\b|:+', 'a::bc'), ['', 'a', '', '',
+ 'bc', ''])
+ self.assertEqual(regex.sub(r'\b|:+', '-', 'a::bc'), '-a---bc-')
+ self.assertEqual(regex.findall(r'\b|:+', 'a::bc'), ['', '', '::',
+ '', ''])
+ self.assertEqual([m.span() for m in regex.finditer(r'\b|:+',
+ 'a::bc')], [(0, 0), (1, 1), (1, 3), (3, 3), (5, 5)])
+ self.assertEqual([m.span() for m in regex.finditer(r'(?m)^\s*?$',
+ 'foo\n\n\nbar')], [(4, 4), (4, 5), (5, 5)])
+
+ def test_line_ending(self):
+ self.assertEqual(regex.findall(r'\R', '\r\n\n\x0B\f\r\x85\u2028\u2029'),
+ ['\r\n', '\n', '\x0B', '\f', '\r', '\x85', '\u2028', '\u2029'])
+ self.assertEqual(regex.findall(br'\R', b'\r\n\n\x0B\f\r\x85'), [b'\r\n',
+ b'\n', b'\x0B', b'\f', b'\r'])
+
+def test_main():
+ unittest.main(verbosity=2)
+
+if __name__ == "__main__":
+ test_main()