diff --git a/.gitattributes b/.gitattributes index 0b2e3056883fdec6a06c77fc5529f18796864d82..4fa34db002e390665d8b0547a722dfbc445b77b9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -82,3 +82,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter= llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__init__.py b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f707416286b22ddbdcf84f60b6ad38ded604bdfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,132 @@ +import sys +import os +import re +import importlib +import warnings + + +is_pypy = '__pypy__' in sys.builtin_module_names + + +warnings.filterwarnings('ignore', + r'.+ distutils\b.+ deprecated', + DeprecationWarning) + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils.") + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + warnings.warn("Setuptools is replacing distutils.") + mods = [name for name in sys.modules if re.match(r'distutils\b', name)] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') + return which == 'local' + + +def ensure_local_distutils(): + clear_distutils() + + # With the DistutilsMetaFinder in place, + # perform an import to cause distutils to be + # loaded from setuptools._distutils. Ref #2906. + add_shim() + importlib.import_module('distutils') + remove_shim() + + # check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + if path is not None: + return + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + import importlib.abc + import importlib.util + + class DistutilsLoader(importlib.abc.Loader): + + def create_module(self, spec): + return importlib.import_module('setuptools._distutils') + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader('distutils', DistutilsLoader()) + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @staticmethod + def pip_imported_during_build(): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + return any( + frame.f_globals['__file__'].endswith('setup.py') + for frame, line in traceback.walk_stack(None) + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeadcb3064d0a9b6858f6f3b9e9090c13886ef8d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0c1ee8155611761181b91b211687dfb362ec38f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..023c4516cfa5f05fa06ead733714f72ee9b830fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85d194503e7816b08e4fca94004af158e3e7b0737ba0647a301cacbd6e92886 +size 3057096 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f860ce3222e8728a2fd007db3e1efd5b5884b737 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA @@ -0,0 +1,38 @@ +Metadata-Version: 2.1 +Name: nvidia-cusolver-cu12 +Version: 11.4.5.107 +Summary: CUDA solver native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-cublas-cu12 +Requires-Dist: nvidia-nvjitlink-cu12 +Requires-Dist: nvidia-cusparse-cu12 + +CUDA solver native runtime libraries diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e637b3d9bc07904b5e4eec34cec9742227e41559 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD @@ -0,0 +1,22 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406 +nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549 +nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292 +nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561 +nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495 +nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826 +nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816 +nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496 +nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626 +nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,, +nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..aabd4f7520077f5f9c18bae61ce8ab5754bc57fd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA @@ -0,0 +1,36 @@ +Metadata-Version: 2.1 +Name: nvidia-cusparse-cu12 +Version: 12.1.0.106 +Summary: CUSPARSE native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-nvjitlink-cu12 + +CUSPARSE native runtime libraries diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2cf8818e2b166fe605908388de295e1815bc92eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD @@ -0,0 +1,17 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/cusparse.h,sha256=yhV9iTcEW9XEyhaJmX4iddh_cMb8sfNAy6qva5ae4qw,287290 +nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587 +nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/lib/libcusparse.so.12,sha256=UARmovVZ3mIqcbuSDT0pI-aRNSRXR6J0LuE-3_C6YIU,264876688 +nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA,sha256=XpBtE4L1lFCx7gDu7Klx9dijNWQW26PS3fcOGjNIsXg,1550 +nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD,, +nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f1c11289f6a54cb07ebdbf31d02e8e81b18b07f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2003-2019 Stuart Bishop + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2cb10460745926764ee300e57e7789229bd98c91 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA @@ -0,0 +1,649 @@ +Metadata-Version: 2.1 +Name: pytz +Version: 2024.1 +Summary: World timezone definitions, modern and historical +Home-page: http://pythonhosted.org/pytz +Author: Stuart Bishop +Author-email: stuart@stuartbishop.net +Maintainer: Stuart Bishop +Maintainer-email: stuart@stuartbishop.net +License: MIT +Download-URL: https://pypi.org/project/pytz/ +Keywords: timezone,tzinfo,datetime,olson,time +Platform: Independent +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.4 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +License-File: LICENSE.txt + +pytz - World Timezone Definitions for Python +============================================ + +:Author: Stuart Bishop + +Introduction +~~~~~~~~~~~~ + +pytz brings the Olson tz database into Python. This library allows +accurate and cross platform timezone calculations using Python 2.4 +or higher. It also solves the issue of ambiguous times at the end +of daylight saving time, which you can read more about in the Python +Library Reference (``datetime.tzinfo``). + +Almost all of the Olson timezones are supported. + +.. note:: + + Projects using Python 3.9 or later should be using the support + now included as part of the standard library, and third party + packages work with it such as `tzdata `_. + pytz offers no advantages beyond backwards compatibility with + code written for earlier versions of Python. + +.. note:: + + This library differs from the documented Python API for + tzinfo implementations; if you want to create local wallclock + times you need to use the ``localize()`` method documented in this + document. In addition, if you perform date arithmetic on local + times that cross DST boundaries, the result may be in an incorrect + timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get + 2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A + ``normalize()`` method is provided to correct this. Unfortunately these + issues cannot be resolved without modifying the Python datetime + implementation (see PEP-431). + + +Installation +~~~~~~~~~~~~ + +This package can either be installed using ``pip`` or from a tarball using the +standard Python distutils. + +If you are installing using ``pip``, you don't need to download anything as the +latest version will be downloaded for you from PyPI:: + + pip install pytz + +If you are installing from a tarball, run the following command as an +administrative user:: + + python setup.py install + + +pytz for Enterprise +~~~~~~~~~~~~~~~~~~~ + +Available as part of the Tidelift Subscription. + +The maintainers of pytz and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. `_. + + +Example & Usage +~~~~~~~~~~~~~~~ + +Localized times and date arithmetic +----------------------------------- + +>>> from datetime import datetime, timedelta +>>> from pytz import timezone +>>> import pytz +>>> utc = pytz.utc +>>> utc.zone +'UTC' +>>> eastern = timezone('US/Eastern') +>>> eastern.zone +'US/Eastern' +>>> amsterdam = timezone('Europe/Amsterdam') +>>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' + +This library only supports two ways of building a localized time. The +first is to use the ``localize()`` method provided by the pytz library. +This is used to localize a naive datetime (datetime with no timezone +information): + +>>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0)) +>>> print(loc_dt.strftime(fmt)) +2002-10-27 06:00:00 EST-0500 + +The second way of building a localized time is by converting an existing +localized time using the standard ``astimezone()`` method: + +>>> ams_dt = loc_dt.astimezone(amsterdam) +>>> ams_dt.strftime(fmt) +'2002-10-27 12:00:00 CET+0100' + +Unfortunately using the tzinfo argument of the standard datetime +constructors ''does not work'' with pytz for many timezones. + +>>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) # /!\ Does not work this way! +'2002-10-27 12:00:00 LMT+0018' + +It is safe for timezones without daylight saving transitions though, such +as UTC: + +>>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) # /!\ Not recommended except for UTC +'2002-10-27 12:00:00 UTC+0000' + +The preferred way of dealing with times is to always work in UTC, +converting to localtime only when generating output to be read +by humans. + +>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) +>>> loc_dt = utc_dt.astimezone(eastern) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:00:00 EST-0500' + +This library also allows you to do date arithmetic using local +times, although it is more complicated than working in UTC as you +need to use the ``normalize()`` method to handle daylight saving time +and other timezone transitions. In this example, ``loc_dt`` is set +to the instant when daylight saving time ends in the US/Eastern +timezone. + +>>> before = loc_dt - timedelta(minutes=10) +>>> before.strftime(fmt) +'2002-10-27 00:50:00 EST-0500' +>>> eastern.normalize(before).strftime(fmt) +'2002-10-27 01:50:00 EDT-0400' +>>> after = eastern.normalize(before + timedelta(minutes=20)) +>>> after.strftime(fmt) +'2002-10-27 01:10:00 EST-0500' + +Creating local times is also tricky, and the reason why working with +local times is not recommended. Unfortunately, you cannot just pass +a ``tzinfo`` argument when constructing a datetime (see the next +section for more details) + +>>> dt = datetime(2002, 10, 27, 1, 30, 0) +>>> dt1 = eastern.localize(dt, is_dst=True) +>>> dt1.strftime(fmt) +'2002-10-27 01:30:00 EDT-0400' +>>> dt2 = eastern.localize(dt, is_dst=False) +>>> dt2.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +Converting between timezones is more easily done, using the +standard astimezone method. + +>>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = utc_dt.astimezone(au_tz) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 AEDT+1100' +>>> utc_dt2 = au_dt.astimezone(utc) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> utc_dt == utc_dt2 +True + +You can take shortcuts when dealing with the UTC side of timezone +conversions. ``normalize()`` and ``localize()`` are not really +necessary when there are no daylight saving time transitions to +deal with. + +>>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 AEDT+1100' +>>> utc_dt2 = au_dt.astimezone(utc) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' + + +``tzinfo`` API +-------------- + +The ``tzinfo`` instances returned by the ``timezone()`` function have +been extended to cope with ambiguous times by adding an ``is_dst`` +parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods. + +>>> tz = timezone('America/St_Johns') + +>>> normal = datetime(2009, 9, 1) +>>> ambiguous = datetime(2009, 10, 31, 23, 30) + +The ``is_dst`` parameter is ignored for most timestamps. It is only used +during DST transition ambiguous periods to resolve that ambiguity. + +>>> print(tz.utcoffset(normal, is_dst=True)) +-1 day, 21:30:00 +>>> print(tz.dst(normal, is_dst=True)) +1:00:00 +>>> tz.tzname(normal, is_dst=True) +'NDT' + +>>> print(tz.utcoffset(ambiguous, is_dst=True)) +-1 day, 21:30:00 +>>> print(tz.dst(ambiguous, is_dst=True)) +1:00:00 +>>> tz.tzname(ambiguous, is_dst=True) +'NDT' + +>>> print(tz.utcoffset(normal, is_dst=False)) +-1 day, 21:30:00 +>>> tz.dst(normal, is_dst=False).seconds +3600 +>>> tz.tzname(normal, is_dst=False) +'NDT' + +>>> print(tz.utcoffset(ambiguous, is_dst=False)) +-1 day, 20:30:00 +>>> tz.dst(ambiguous, is_dst=False) +datetime.timedelta(0) +>>> tz.tzname(ambiguous, is_dst=False) +'NST' + +If ``is_dst`` is not specified, ambiguous timestamps will raise +an ``pytz.exceptions.AmbiguousTimeError`` exception. + +>>> print(tz.utcoffset(normal)) +-1 day, 21:30:00 +>>> print(tz.dst(normal)) +1:00:00 +>>> tz.tzname(normal) +'NDT' + +>>> import pytz.exceptions +>>> try: +... tz.utcoffset(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 +>>> try: +... tz.dst(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 +>>> try: +... tz.tzname(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 + + +Problems with Localtime +~~~~~~~~~~~~~~~~~~~~~~~ + +The major problem we have to deal with is that certain datetimes +may occur twice in a year. For example, in the US/Eastern timezone +on the last Sunday morning in October, the following sequence +happens: + + - 01:00 EDT occurs + - 1 hour later, instead of 2:00am the clock is turned back 1 hour + and 01:00 happens again (this time 01:00 EST) + +In fact, every instant between 01:00 and 02:00 occurs twice. This means +that if you try and create a time in the 'US/Eastern' timezone +the standard datetime syntax, there is no way to specify if you meant +before of after the end-of-daylight-saving-time transition. Using the +pytz custom syntax, the best you can do is make an educated guess: + +>>> loc_dt = eastern.localize(datetime(2002, 10, 27, 1, 30, 00)) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +As you can see, the system has chosen one for you and there is a 50% +chance of it being out by one hour. For some applications, this does +not matter. However, if you are trying to schedule meetings with people +in different timezones or analyze log files it is not acceptable. + +The best and simplest solution is to stick with using UTC. The pytz +package encourages using UTC for internal timezone representation by +including a special UTC implementation based on the standard Python +reference implementation in the Python documentation. + +The UTC timezone unpickles to be the same instance, and pickles to a +smaller size than other pytz tzinfo instances. The UTC implementation +can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC'). + +>>> import pickle, pytz +>>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) +>>> naive = dt.replace(tzinfo=None) +>>> p = pickle.dumps(dt, 1) +>>> naive_p = pickle.dumps(naive, 1) +>>> len(p) - len(naive_p) +17 +>>> new = pickle.loads(p) +>>> new == dt +True +>>> new is dt +False +>>> new.tzinfo is dt.tzinfo +True +>>> pytz.utc is pytz.UTC is pytz.timezone('UTC') +True + +Note that some other timezones are commonly thought of as the same (GMT, +Greenwich, Universal, etc.). The definition of UTC is distinct from these +other timezones, and they are not equivalent. For this reason, they will +not compare the same in Python. + +>>> utc == pytz.timezone('GMT') +False + +See the section `What is UTC`_, below. + +If you insist on working with local times, this library provides a +facility for constructing them unambiguously: + +>>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) +>>> est_dt = eastern.localize(loc_dt, is_dst=True) +>>> edt_dt = eastern.localize(loc_dt, is_dst=False) +>>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt)) +2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 + +If you pass None as the is_dst flag to localize(), pytz will refuse to +guess and raise exceptions if you try to build ambiguous or non-existent +times. + +For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern +timezone when the clocks where put back at the end of Daylight Saving +Time: + +>>> dt = datetime(2002, 10, 27, 1, 30, 00) +>>> try: +... eastern.localize(dt, is_dst=None) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % dt) +pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00 + +Similarly, 2:30am on 7th April 2002 never happened at all in the +US/Eastern timezone, as the clocks where put forward at 2:00am skipping +the entire hour: + +>>> dt = datetime(2002, 4, 7, 2, 30, 00) +>>> try: +... eastern.localize(dt, is_dst=None) +... except pytz.exceptions.NonExistentTimeError: +... print('pytz.exceptions.NonExistentTimeError: %s' % dt) +pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00 + +Both of these exceptions share a common base class to make error handling +easier: + +>>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError) +True +>>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError) +True + + +A special case is where countries change their timezone definitions +with no daylight savings time switch. For example, in 1915 Warsaw +switched from Warsaw time to Central European time with no daylight savings +transition. So at the stroke of midnight on August 5th 1915 the clocks +were wound back 24 minutes creating an ambiguous time period that cannot +be specified without referring to the timezone abbreviation or the +actual UTC offset. In this case midnight happened twice, neither time +during a daylight saving time period. pytz handles this transition by +treating the ambiguous period before the switch as daylight savings +time, and the ambiguous period after as standard time. + + +>>> warsaw = pytz.timezone('Europe/Warsaw') +>>> amb_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=True) +>>> amb_dt1.strftime(fmt) +'1915-08-04 23:59:59 WMT+0124' +>>> amb_dt2 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False) +>>> amb_dt2.strftime(fmt) +'1915-08-04 23:59:59 CET+0100' +>>> switch_dt = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False) +>>> switch_dt.strftime(fmt) +'1915-08-05 00:00:00 CET+0100' +>>> str(switch_dt - amb_dt1) +'0:24:01' +>>> str(switch_dt - amb_dt2) +'0:00:01' + +The best way of creating a time during an ambiguous time period is +by converting from another timezone such as UTC: + +>>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc) +>>> utc_dt.astimezone(warsaw).strftime(fmt) +'1915-08-04 23:36:00 CET+0100' + +The standard Python way of handling all these ambiguities is not to +handle them, such as demonstrated in this example using the US/Eastern +timezone definition from the Python documentation (Note that this +implementation only works for dates between 1987 and 2006 - it is +included for tests only!): + +>>> from pytz.reference import Eastern # pytz.reference only for tests +>>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) +>>> str(dt) +'2002-10-27 00:30:00-04:00' +>>> str(dt + timedelta(hours=1)) +'2002-10-27 01:30:00-05:00' +>>> str(dt + timedelta(hours=2)) +'2002-10-27 02:30:00-05:00' +>>> str(dt + timedelta(hours=3)) +'2002-10-27 03:30:00-05:00' + +Notice the first two results? At first glance you might think they are +correct, but taking the UTC offset into account you find that they are +actually two hours appart instead of the 1 hour we asked for. + +>>> from pytz.reference import UTC # pytz.reference only for tests +>>> str(dt.astimezone(UTC)) +'2002-10-27 04:30:00+00:00' +>>> str((dt + timedelta(hours=1)).astimezone(UTC)) +'2002-10-27 06:30:00+00:00' + + +Country Information +~~~~~~~~~~~~~~~~~~~ + +A mechanism is provided to access the timezones commonly in use +for a particular country, looked up using the ISO 3166 country code. +It returns a list of strings that can be used to retrieve the relevant +tzinfo instance using ``pytz.timezone()``: + +>>> print(' '.join(pytz.country_timezones['nz'])) +Pacific/Auckland Pacific/Chatham + +The Olson database comes with a ISO 3166 country code to English country +name mapping that pytz exposes as a dictionary: + +>>> print(pytz.country_names['nz']) +New Zealand + + +What is UTC +~~~~~~~~~~~ + +'UTC' is `Coordinated Universal Time`_. It is a successor to, but distinct +from, Greenwich Mean Time (GMT) and the various definitions of Universal +Time. UTC is now the worldwide standard for regulating clocks and time +measurement. + +All other timezones are defined relative to UTC, and include offsets like +UTC+0800 - hours to add or subtract from UTC to derive the local time. No +daylight saving time occurs in UTC, making it a useful timezone to perform +date arithmetic without worrying about the confusion and ambiguities caused +by daylight saving time transitions, your country changing its timezone, or +mobile computers that roam through multiple timezones. + +.. _Coordinated Universal Time: https://en.wikipedia.org/wiki/Coordinated_Universal_Time + + +Helpers +~~~~~~~ + +There are two lists of timezones provided. + +``all_timezones`` is the exhaustive list of the timezone names that can +be used. + +>>> from pytz import all_timezones +>>> len(all_timezones) >= 500 +True +>>> 'Etc/Greenwich' in all_timezones +True + +``common_timezones`` is a list of useful, current timezones. It doesn't +contain deprecated zones or historical zones, except for a few I've +deemed in common usage, such as US/Eastern (open a bug report if you +think other timezones are deserving of being included here). It is also +a sequence of strings. + +>>> from pytz import common_timezones +>>> len(common_timezones) < len(all_timezones) +True +>>> 'Etc/Greenwich' in common_timezones +False +>>> 'Australia/Melbourne' in common_timezones +True +>>> 'US/Eastern' in common_timezones +True +>>> 'Canada/Eastern' in common_timezones +True +>>> 'Australia/Yancowinna' in all_timezones +True +>>> 'Australia/Yancowinna' in common_timezones +False + +Both ``common_timezones`` and ``all_timezones`` are alphabetically +sorted: + +>>> common_timezones_dupe = common_timezones[:] +>>> common_timezones_dupe.sort() +>>> common_timezones == common_timezones_dupe +True +>>> all_timezones_dupe = all_timezones[:] +>>> all_timezones_dupe.sort() +>>> all_timezones == all_timezones_dupe +True + +``all_timezones`` and ``common_timezones`` are also available as sets. + +>>> from pytz import all_timezones_set, common_timezones_set +>>> 'US/Eastern' in all_timezones_set +True +>>> 'US/Eastern' in common_timezones_set +True +>>> 'Australia/Victoria' in common_timezones_set +False + +You can also retrieve lists of timezones used by particular countries +using the ``country_timezones()`` function. It requires an ISO-3166 +two letter country code. + +>>> from pytz import country_timezones +>>> print(' '.join(country_timezones('ch'))) +Europe/Zurich +>>> print(' '.join(country_timezones('CH'))) +Europe/Zurich + + +Internationalization - i18n/l10n +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pytz is an interface to the IANA database, which uses ASCII names. The `Unicode Consortium's Unicode Locales (CLDR) `_ +project provides translations. Python packages such as +`Babel `_ +and Thomas Khyn's `l18n `_ package can be used +to access these translations from Python. + + +License +~~~~~~~ + +MIT license. + +This code is also available as part of Zope 3 under the Zope Public +License, Version 2.1 (ZPL). + +I'm happy to relicense this code if necessary for inclusion in other +open source projects. + + +Latest Versions +~~~~~~~~~~~~~~~ + +This package will be updated after releases of the Olson timezone +database. The latest version can be downloaded from the `Python Package +Index `_. The code that is used +to generate this distribution is hosted on Github and available +using git:: + + git clone https://github.com/stub42/pytz.git + +Announcements of new releases are made on +`Launchpad `_, and the +`Atom feed `_ +hosted there. + + +Bugs, Feature Requests & Patches +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bugs should be reported on `Github `_. +Feature requests are unlikely to be considered, and efforts instead directed +to timezone support now built into Python or packages that work with it. + + +Security Issues +~~~~~~~~~~~~~~~ + +Reports about security issues can be made via `Tidelift `_. + + +Issues & Limitations +~~~~~~~~~~~~~~~~~~~~ + +- This project is in maintenance mode. Projects using Python 3.9 or later + are best served by using the timezone functionaly now included in core + Python and packages that work with it such as `tzdata `_. + +- Offsets from UTC are rounded to the nearest whole minute, so timezones + such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This + was a limitation of the Python datetime library. + +- If you think a timezone definition is incorrect, I probably can't fix + it. pytz is a direct translation of the Olson timezone database, and + changes to the timezone definitions need to be made to this source. + If you find errors they should be reported to the time zone mailing + list, linked from http://www.iana.org/time-zones. + + +Further Reading +~~~~~~~~~~~~~~~ + +More info than you want to know about timezones: +https://data.iana.org/time-zones/tz-link.html + + +Contact +~~~~~~~ + +Stuart Bishop + + diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3e8b5b44c170e83e95ff9ca97abf9ef133d5fa47 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD @@ -0,0 +1,622 @@ +pytz-2024.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytz-2024.1.dist-info/LICENSE.txt,sha256=vosaN-vibFkqkPbA6zMQOn84POL010mMCvmlJpkKB7g,1088 +pytz-2024.1.dist-info/METADATA,sha256=2mOz3YzpRCJtu0iklrKsUm8a8BmJglIL_qqGhhduPJk,22325 +pytz-2024.1.dist-info/RECORD,, +pytz-2024.1.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +pytz-2024.1.dist-info/top_level.txt,sha256=6xRYlt934v1yHb1JIrXgHyGxn3cqACvd-yE8ski_kcc,5 +pytz-2024.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pytz/__init__.py,sha256=RZJJJ1W2RyP9fllsMNO4w-yjJRpIazWJ9fvj5telYig,35101 +pytz/__pycache__/__init__.cpython-310.pyc,, +pytz/__pycache__/exceptions.cpython-310.pyc,, +pytz/__pycache__/lazy.cpython-310.pyc,, +pytz/__pycache__/reference.cpython-310.pyc,, +pytz/__pycache__/tzfile.cpython-310.pyc,, +pytz/__pycache__/tzinfo.cpython-310.pyc,, +pytz/exceptions.py,sha256=434ZcuLlpLQY9mWoGq7zJMV1TyiYvVgpKBU1qZkbDjM,1571 +pytz/lazy.py,sha256=toeR5uDWKBj6ezsUZ4elNP6CEMtK7CO2jS9A30nsFbo,5404 +pytz/reference.py,sha256=zUtCki7JFEmrzrjNsfMD7YL0lWDxynKc1Ubo4iXSs74,3778 +pytz/tzfile.py,sha256=K2y7pZs4vydpZVftrfAA_-hgw17y1Szc7z_QCse6udU,4723 +pytz/tzinfo.py,sha256=XfaVOoO3KsCvtUYaCd0fvgBXWZ8tgevGYUoBh_uiE60,19340 +pytz/zoneinfo/Africa/Abidjan,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Accra,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Addis_Ababa,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Algiers,sha256=vaFpjNVCwObnbfu82rOQzdJvN6nVgmpXpQ1aqzfzsqY,735 +pytz/zoneinfo/Africa/Asmara,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Asmera,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Bamako,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Bangui,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Banjul,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Bissau,sha256=IjuxDP6EZiDHFvl_bHS6NN7sdRxLKXllooBC829poak,194 +pytz/zoneinfo/Africa/Blantyre,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Brazzaville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Bujumbura,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Cairo,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399 +pytz/zoneinfo/Africa/Casablanca,sha256=4RqVbw_F3ZucopIC2ivAJ8WDwj5wRODAB67tBpdXcgA,2429 +pytz/zoneinfo/Africa/Ceuta,sha256=Cw-2_nFDGbN8WqIsVpcauyZooWX8j3Kmx2PnC0fHut8,2052 +pytz/zoneinfo/Africa/Conakry,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Dakar,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Dar_es_Salaam,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Djibouti,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Douala,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/El_Aaiun,sha256=UWCCqQLJxd8qsTYw82kz9W1suwW5TRgnZw31sDWDz20,2295 +pytz/zoneinfo/Africa/Freetown,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Gaborone,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Harare,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Johannesburg,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Juba,sha256=UVnIqEPJwHLTMC-r5qZQHNv9opoYVsKdq-ta_5XUw_Q,679 +pytz/zoneinfo/Africa/Kampala,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Khartoum,sha256=MYWDoJ3AcCItZdApoeOgtWWDDxquwTon5v5TOGP70-o,679 +pytz/zoneinfo/Africa/Kigali,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Kinshasa,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lagos,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Libreville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lome,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Luanda,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lubumbashi,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Lusaka,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Malabo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Maputo,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Maseru,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Mbabane,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Mogadishu,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Monrovia,sha256=-VsJW5cU4KdvfgYaQVv4lcuzmaKIVFMd42nO6RXOBdU,208 +pytz/zoneinfo/Africa/Nairobi,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Ndjamena,sha256=8T3A0Zm9Gj0Bvm6rd88t3GAXKiKdGUfHlIqYlkYI0KM,199 +pytz/zoneinfo/Africa/Niamey,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Nouakchott,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Ouagadougou,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Porto-Novo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Sao_Tome,sha256=MdjxpQ268uzJ7Zx1ZroFUtRUwqsJ6F_yY3AYV9FXw1I,254 +pytz/zoneinfo/Africa/Timbuktu,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Tripoli,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 +pytz/zoneinfo/Africa/Tunis,sha256=OFVMEM4eYT2Ez0beuhEUCTSIpcFldWxsV2uEoTZIUNI,689 +pytz/zoneinfo/Africa/Windhoek,sha256=xuhvudrMH4alnVmouSTQI8YL8F_HbgsF2EQ7AZKzuHs,955 +pytz/zoneinfo/America/Adak,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/America/Anchorage,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 +pytz/zoneinfo/America/Anguilla,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Antigua,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Araguaina,sha256=G6v9wYFZ8EB4WQfIsqRbbiiKd2b27j7Zt5dFjBbzx2o,870 +pytz/zoneinfo/America/Argentina/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062 +pytz/zoneinfo/America/Argentina/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Argentina/ComodRivadavia,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Argentina/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Argentina/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034 +pytz/zoneinfo/America/Argentina/La_Rioja,sha256=Um6XoVXhsr62ad1mWuebe6NY0ZHauBdR9tMGDgqCOHg,1076 +pytz/zoneinfo/America/Argentina/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062 +pytz/zoneinfo/America/Argentina/Rio_Gallegos,sha256=F9ZKR4o8gLHX7QBuIjMapGIdmzJxpqwbouPgZ5MqDpY,1062 +pytz/zoneinfo/America/Argentina/Salta,sha256=h1KYrDNIapvDkYhi1PaB8WD1qWOe4vhhgDJWDCGV4jc,1034 +pytz/zoneinfo/America/Argentina/San_Juan,sha256=AI2GltA80mPNzhHxYycuEwIbO1ANXyIqBQZMpjqKqdQ,1076 +pytz/zoneinfo/America/Argentina/San_Luis,sha256=2ItGRcLVK2wx8MyJsHbIBBeAkU4B-MN5x1ZxNyZ7UJE,1088 +pytz/zoneinfo/America/Argentina/Tucuman,sha256=twO-FqtNJV8XOzWTvFQ-xnEcWCoDUHY3gpVIG0Mzbf8,1090 +pytz/zoneinfo/America/Argentina/Ushuaia,sha256=A6IbpVlY9IIPoSKMFRR9DMROdwXUSDc2HsASueOSnqo,1062 +pytz/zoneinfo/America/Aruba,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Asuncion,sha256=V8wwkUoNqyj0C-fUSADpU7HU8H3Qkr3jNPJ4SLsGUIc,2030 +pytz/zoneinfo/America/Atikokan,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Atka,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/America/Bahia,sha256=qi7dA6FofDhLxVMmd2L8bK3HeaQnc9X-jiijwyfhs3g,1010 +pytz/zoneinfo/America/Bahia_Banderas,sha256=L6iHYbA1Us1pljllFLEIAHW4ZaZhFKoG2Zr8TT5aY38,1152 +pytz/zoneinfo/America/Barbados,sha256=ima-Qrrhazu4Qfvu2Z0-e6E-GTiYknuJBu6c2yVG9LE,436 +pytz/zoneinfo/America/Belem,sha256=aZMUgtFDdHNISpqyQRYbmS2IBD-BAS3CaJnhu6onLCY,562 +pytz/zoneinfo/America/Belize,sha256=pkfLY2KfPchbeJa1pWcXmWAwp4ZlRvxWLVezXnrbkws,1614 +pytz/zoneinfo/America/Blanc-Sablon,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Boa_Vista,sha256=dMtaG11kGlJrgJJgGWEDZZAmnO_HfT3L4X8pI72LLFY,618 +pytz/zoneinfo/America/Bogota,sha256=Z1ernZZGQxulE8KFWHYWcM3SV1jn2_QEc1Q0OJzHRak,232 +pytz/zoneinfo/America/Boise,sha256=7HQsNPJiUheQgFz5kVLvTnf5xhXAYaeANqDskxKz2Vs,2410 +pytz/zoneinfo/America/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062 +pytz/zoneinfo/America/Cambridge_Bay,sha256=_4xRlX3WdVpEcqoT6myD7NeTCXnn9OYk_iH006bwULo,2254 +pytz/zoneinfo/America/Campo_Grande,sha256=gINiXg5i2e6Rh2Nbo2bFqhPAJL4F4cAqGnBankXTDXw,1430 +pytz/zoneinfo/America/Cancun,sha256=lI4ZtiBtxKqNHvU47vRSwc5-GDl8JOdC2A6oc9s8iIo,834 +pytz/zoneinfo/America/Caracas,sha256=mUNMFdDzZLav_ePA1ocBdmqVBierkeEszTIFpNCm5J0,250 +pytz/zoneinfo/America/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Cayenne,sha256=4k7Iv1woX4atqePKrcvMQD2Vk9Tmma7rW_AW_R62pCc,184 +pytz/zoneinfo/America/Cayman,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Chicago,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592 +pytz/zoneinfo/America/Chihuahua,sha256=ZAlPSsUfT3VGp1VdibnHIf-QsdEIqHuzX15wu2P2YQk,1102 +pytz/zoneinfo/America/Ciudad_Juarez,sha256=OQstyPrMxx3nNEbzgDhq_W0mK49-ApNMK7_6p-6dJ64,1538 +pytz/zoneinfo/America/Coral_Harbour,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Costa_Rica,sha256=74rYa6lrgIkyls9PkHo8SCYl9oOqiuG5S7MWdnJelP4,316 +pytz/zoneinfo/America/Creston,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/America/Cuiaba,sha256=GRJqkhRXNsOUcgjZddQxRIJdRYaw9pM_YLWbun88dkg,1402 +pytz/zoneinfo/America/Curacao,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Danmarkshavn,sha256=YRZAfUCoVtaL1L-MYMYMH1wyOaVQnfUo_gFnvMXSuzw,698 +pytz/zoneinfo/America/Dawson,sha256=rAHhyuMuyjf_eyA2SBG76MRBf_fj_xi5FAuiWVQgJhw,1614 +pytz/zoneinfo/America/Dawson_Creek,sha256=aJXCyP4j3ggE4wGCN-LrS9hpD_5zWHzQTeSAKTWEPUM,1050 +pytz/zoneinfo/America/Denver,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/America/Detroit,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 +pytz/zoneinfo/America/Dominica,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Edmonton,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/America/Eirunepe,sha256=j5eExkjFaqtC-D8XK0rGzoF9yEgbSlTbPqVG9WKhEa8,642 +pytz/zoneinfo/America/El_Salvador,sha256=gvGN8Lkj-sGm2_rs8OUjAMf1oMtKp2Xes6UfWT0WqgU,224 +pytz/zoneinfo/America/Ensenada,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Fort_Nelson,sha256=erfODr3DrSpz65kAdO7Ts2dGbZxvddEP6gx4BX3y2J0,2240 +pytz/zoneinfo/America/Fort_Wayne,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Fortaleza,sha256=rjiSB0q1cBuMDOM9orW_uwe5UOLBwTlfjFotwOYe1mU,702 +pytz/zoneinfo/America/Glace_Bay,sha256=G8DGLGCapH_aYCF_OhaL5Qonf7FOAgAPwelO5htCWBc,2192 +pytz/zoneinfo/America/Godthab,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889 +pytz/zoneinfo/America/Goose_Bay,sha256=JgaLueghSvX2g725FOfIgpgvsqxZGykWOhAZWGpQZRY,3210 +pytz/zoneinfo/America/Grand_Turk,sha256=4YOFEPK60Bel2_fCsY6vSZxUcMJKjiKtyOf_Q0khEwU,1834 +pytz/zoneinfo/America/Grenada,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Guadeloupe,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Guatemala,sha256=dugUgCd6QY52yHkHuUP4jRWzo5x439IQigaYCvEF46Q,280 +pytz/zoneinfo/America/Guayaquil,sha256=j2UuIo-4RgSOlTNfu77mhZ92waNTeKFSvmoVemJooT0,232 +pytz/zoneinfo/America/Guyana,sha256=R0bOvCRDC8SRIexmhsduPdHbbRPwI2GviD9otExiUrk,248 +pytz/zoneinfo/America/Halifax,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 +pytz/zoneinfo/America/Havana,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 +pytz/zoneinfo/America/Hermosillo,sha256=WnlVBpVBG8ONnz0wpxteXmuvSzOGwSlAisvDd1GtKYA,456 +pytz/zoneinfo/America/Indiana/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Indiana/Knox,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/America/Indiana/Marengo,sha256=f3tQ-lgMSUA7nvn64pXhKtJL7mWzGajoCega5MEJSbI,1738 +pytz/zoneinfo/America/Indiana/Petersburg,sha256=A88OHuM0Rg3iMLHjKgXq_d2jZCdVSytUQs-9W0KcFyQ,1920 +pytz/zoneinfo/America/Indiana/Tell_City,sha256=4dWqAr9Y2BXfL4pAQk-81c3gGl2cNdHXOD7_wJhhhn8,1700 +pytz/zoneinfo/America/Indiana/Vevay,sha256=H7VR2G-_sD_C5Rm4P3g1iRC1FWCPg4m0MGD3P1PLzsk,1430 +pytz/zoneinfo/America/Indiana/Vincennes,sha256=62mAxT7APFCaoygflnEzdOpe-fuW1yObI6m6EUUcS7A,1710 +pytz/zoneinfo/America/Indiana/Winamac,sha256=aZGM2jR8CH9BHSUq7XygiweDd6dorXLPXg246XsbR6s,1794 +pytz/zoneinfo/America/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Inuvik,sha256=6J-mapDnrk9A1LtswoE34tqSy_ufedcEBNxixkrEjIo,2074 +pytz/zoneinfo/America/Iqaluit,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202 +pytz/zoneinfo/America/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 +pytz/zoneinfo/America/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034 +pytz/zoneinfo/America/Juneau,sha256=k7hxb0aGRnfnE-DBi3LkcjAzRPyAf0_Hw0vVFfjGeb0,2353 +pytz/zoneinfo/America/Kentucky/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788 +pytz/zoneinfo/America/Kentucky/Monticello,sha256=LtdyCo85BrXQs6rlH61Ym-8KqWHH6PwAOjD0QxhIdzM,2368 +pytz/zoneinfo/America/Knox_IN,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/America/Kralendijk,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/La_Paz,sha256=hqfD8LQHupdZhji2e93_9pOQAT-R7muzzjP0nyfbFXY,218 +pytz/zoneinfo/America/Lima,sha256=HHgTnDUnCZzibvL0MrG8qyOuvjmYYw3e3R5VbnxMZs8,392 +pytz/zoneinfo/America/Los_Angeles,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852 +pytz/zoneinfo/America/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788 +pytz/zoneinfo/America/Lower_Princes,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Maceio,sha256=3R5DlSe32kQDmoSVIWpcyk2o7qohr-rliwqDSGFIMyQ,730 +pytz/zoneinfo/America/Managua,sha256=xBzF01AHn2E2fD8Qdy-DHFe36UqoeNpKPfChduBKWdk,430 +pytz/zoneinfo/America/Manaus,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590 +pytz/zoneinfo/America/Marigot,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Martinique,sha256=fMs80kOU2YFvC0f9y2eje97JeAtTYBamXrnlTunNLzQ,232 +pytz/zoneinfo/America/Matamoros,sha256=fq-PqdmZrQ98UsFmHA9ivjBZv5GEBRTOuLQ5Cu5ajW8,1418 +pytz/zoneinfo/America/Mazatlan,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128 +pytz/zoneinfo/America/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062 +pytz/zoneinfo/America/Menominee,sha256=Arv9WLbfhNcpRsUjHDU757BEdwlp08Gt30AixG3gZ04,2274 +pytz/zoneinfo/America/Merida,sha256=ORJCGiO2mXG-kk5ZZGro1MNuKqRnJx6HJlvoezTMM90,1004 +pytz/zoneinfo/America/Metlakatla,sha256=twmieGTVY2V-U8nFxqvx7asYv8GVjeWdLtrOI7UApVI,1423 +pytz/zoneinfo/America/Mexico_City,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222 +pytz/zoneinfo/America/Miquelon,sha256=l5txBJYe9HTRZlILcbSL_HNDYrjUb0ouecNy7QEkg9c,1652 +pytz/zoneinfo/America/Moncton,sha256=Wmv-bk9aKKcWWzOpc1UFu67HOfwaIk2Wmh3LgqGctys,3154 +pytz/zoneinfo/America/Monterrey,sha256=vKBLVjG0bNVDI07M4WwOVv2KbrYJVNTLmc19iM2CvTU,980 +pytz/zoneinfo/America/Montevideo,sha256=dQEBE4mjZPtyRjKXK6Z-bMHJdFqpwhIzxDH4x04rKYk,1496 +pytz/zoneinfo/America/Montreal,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Montserrat,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Nassau,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/New_York,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552 +pytz/zoneinfo/America/Nipigon,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Nome,sha256=2izM3-P-PqJ9za6MdhzFfMvPFNq7Gim69tAvEwPeY2s,2367 +pytz/zoneinfo/America/Noronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702 +pytz/zoneinfo/America/North_Dakota/Beulah,sha256=qtgbqNu8M3AkHF2n-_oSps1pYT4SxgclbkkPKbXaBHs,2396 +pytz/zoneinfo/America/North_Dakota/Center,sha256=9ZWbK9YKkquULyBUFS3Lr_idxbt7V7y4W4EO0Kn20sw,2396 +pytz/zoneinfo/America/North_Dakota/New_Salem,sha256=DH_bsQfuUnK2obdb06KgisO4XLqht12BXdrgUsZZveg,2396 +pytz/zoneinfo/America/Nuuk,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889 +pytz/zoneinfo/America/Ojinaga,sha256=9catgEQ2SD7qfuvTMxs15Cdd9SKaUy-svEzPBFw2Q3Q,1524 +pytz/zoneinfo/America/Panama,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Pangnirtung,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202 +pytz/zoneinfo/America/Paramaribo,sha256=Z7UZvNlgd-qEUHjEPYXIkLNTgjMcCzk9EfUUEmUyd7M,248 +pytz/zoneinfo/America/Phoenix,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/America/Port-au-Prince,sha256=09ZAJd4IOiMpfdpUuF1U44R_hRt6BvpAkFXOnYO9yOM,1434 +pytz/zoneinfo/America/Port_of_Spain,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Porto_Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/America/Porto_Velho,sha256=uSMV2hZWj-VyBhFBwC950wcThfN3jq6KlycESmQTLOA,562 +pytz/zoneinfo/America/Puerto_Rico,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Punta_Arenas,sha256=tR5uIf1351AWFqrqNtmXnhQWnKREmJaZqKBzaWRVMTQ,1902 +pytz/zoneinfo/America/Rainy_River,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/America/Rankin_Inlet,sha256=nXgqjL3O2BV0em-Xk8qVRRZb_X0yQmHE6vmSSvI9Kzc,2066 +pytz/zoneinfo/America/Recife,sha256=bJ_HE0-JFio4-owpZ0pLO8U3ai0fiGu8QHL0DexLiLc,702 +pytz/zoneinfo/America/Regina,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 +pytz/zoneinfo/America/Resolute,sha256=CnMU2dBI-63vt8-J0Q1Ropx-8b9pRCLjhvrycMIedGg,2066 +pytz/zoneinfo/America/Rio_Branco,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/America/Rosario,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Santa_Isabel,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Santarem,sha256=VmZP9S5pPucFxyqAOV908EmWXQZvgCgWLmlJJTUl0LE,588 +pytz/zoneinfo/America/Santiago,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515 +pytz/zoneinfo/America/Santo_Domingo,sha256=DKtaEj8fQ92ybITTWU4Bm160S9pzJmUVbjaWRnenxU4,458 +pytz/zoneinfo/America/Sao_Paulo,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430 +pytz/zoneinfo/America/Scoresbysund,sha256=K-qkiMCCFgOe8ccPMABA-lDjc9vb6wpluBOCVfiBdLI,1935 +pytz/zoneinfo/America/Shiprock,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/America/Sitka,sha256=aiS7Fk37hZpzZ9VkeJQeF-BqTLRC1QOTCgMAJwT8UxA,2329 +pytz/zoneinfo/America/St_Barthelemy,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Johns,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 +pytz/zoneinfo/America/St_Kitts,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Lucia,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Thomas,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Vincent,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Swift_Current,sha256=RRKOF7vZC8VvYxD8PP4J1_hUPayKBP7Lu80avRkfPDY,560 +pytz/zoneinfo/America/Tegucigalpa,sha256=EzOz7ntTlreMq69JZ2CcAb8Ps98V9bUMN480tpPIyw4,252 +pytz/zoneinfo/America/Thule,sha256=8xuPRaZU8RgO5ECqFYHYmnHioc81sBOailkVu8Y02i8,1502 +pytz/zoneinfo/America/Thunder_Bay,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Tijuana,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Toronto,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Tortola,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Vancouver,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 +pytz/zoneinfo/America/Virgin,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Whitehorse,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614 +pytz/zoneinfo/America/Winnipeg,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/America/Yakutat,sha256=tFwnKbvwhyyn4LNTAn5ye_JWDdxjCerNDt7oOwUwO2M,2305 +pytz/zoneinfo/America/Yellowknife,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/Antarctica/Casey,sha256=VeaLOxTfDyjfGXq5Ul95JEIMXNWHSW-0N3yOoS7VK-c,423 +pytz/zoneinfo/Antarctica/Davis,sha256=XB12dEq0Q-3XkzBNTNC7G1fzH-WxxctIuZqI3zp8ypI,283 +pytz/zoneinfo/Antarctica/DumontDUrville,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Antarctica/Macquarie,sha256=ie7RlaU8RHTorVVj-MX8StKMqx_oXf4UH2PUqpzcwe0,2260 +pytz/zoneinfo/Antarctica/Mawson,sha256=EjIFbqRdr2ZJBaI1XvoWRptnnW1LFrlhydxDDuIQjSI,185 +pytz/zoneinfo/Antarctica/McMurdo,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Antarctica/Palmer,sha256=HTZY0M8td7oUx5REPgRCHuqKg5V3fjJEi4lYBNL4Etg,1404 +pytz/zoneinfo/Antarctica/Rothera,sha256=_9NY-f8vkozQYrjbUHP5YjcICg0-LuyA9PnIeK123RU,150 +pytz/zoneinfo/Antarctica/South_Pole,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Antarctica/Syowa,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Antarctica/Troll,sha256=fjcYppwr1FnjEssee-RLgGOANzoUyfjse-RGK46PR2E,1148 +pytz/zoneinfo/Antarctica/Vostok,sha256=KfftwdzK6PkMDz0d-D3z4HKIBgY9KqsqHnTnqsPMrUg,213 +pytz/zoneinfo/Arctic/Longyearbyen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Asia/Aden,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Almaty,sha256=lPLWXk2f1mWYRQZFkIrq_5HkhocsUBis0M-yhdDHcBQ,983 +pytz/zoneinfo/Asia/Amman,sha256=Qv4cXXw7KBQWE882cgj0kjQ3wh1vpV1orJ2v2Jjxr2U,1433 +pytz/zoneinfo/Asia/Anadyr,sha256=WqKnHo5IHSWZ08d2sS5ytHtv0MQMoczP3W9zbDDrbYU,1174 +pytz/zoneinfo/Asia/Aqtau,sha256=4n654FZtDssXSfhQszjZG5OmtbE2zo1KbiWcYrFJg00,969 +pytz/zoneinfo/Asia/Aqtobe,sha256=1oFHTb-ybcTqLXm0r1ZOVgdYMTHlGoNs-Pgvux50d3E,997 +pytz/zoneinfo/Asia/Ashgabat,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605 +pytz/zoneinfo/Asia/Ashkhabad,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605 +pytz/zoneinfo/Asia/Atyrau,sha256=_U8COUIE9nG_HKddZE1Q0sPuz3rMwfjwmfnVDY_vSmg,977 +pytz/zoneinfo/Asia/Baghdad,sha256=S-plKI4zCLqI0idGABEk3oRTazNyrIj2T98-EtWtZD8,969 +pytz/zoneinfo/Asia/Bahrain,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185 +pytz/zoneinfo/Asia/Baku,sha256=6_hq98SGG0j0JA8qYx96WcIMZSLW4w460QXh_OM_ccg,1213 +pytz/zoneinfo/Asia/Bangkok,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Barnaul,sha256=3zeUimLTMrIZE0vX6XHFvB3MoqExoVbE5CSm6GV0zf0,1207 +pytz/zoneinfo/Asia/Beirut,sha256=_Z_2ZAg_iL9vU51JDB8CB04uXBDrf1kLIis-JnXaS2o,2154 +pytz/zoneinfo/Asia/Bishkek,sha256=IOoUyjABILCkXu1rjCIqSwAufRYFklc5YAC4jdhVw6Q,969 +pytz/zoneinfo/Asia/Brunei,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469 +pytz/zoneinfo/Asia/Calcutta,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 +pytz/zoneinfo/Asia/Chita,sha256=LbSlS23swFkANUScg8zkNR0imANWNfOIaYd39HbLdIQ,1207 +pytz/zoneinfo/Asia/Choibalsan,sha256=atm7FmPwZGsftLM7vS1LltjcdaDC-DSg1cIdP2MF17I,935 +pytz/zoneinfo/Asia/Chongqing,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Chungking,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Colombo,sha256=w52L7bgT4m5hcgRuevIPY83xytfkBmkLhnKMwp16KsY,358 +pytz/zoneinfo/Asia/Dacca,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323 +pytz/zoneinfo/Asia/Damascus,sha256=EthGheaHWmy5IrLCc9NmM3jvASQFHt8TsBF07I1tgbg,1873 +pytz/zoneinfo/Asia/Dhaka,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323 +pytz/zoneinfo/Asia/Dili,sha256=0mUs0Utk-uW9deZV3cBUTpfWMgFvl0DyN29JuKvKMyw,213 +pytz/zoneinfo/Asia/Dubai,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Asia/Dushanbe,sha256=koYnnYWuFsBXd1vJfZsGdpwnbFHEwvkGBmSrrx3KIss,577 +pytz/zoneinfo/Asia/Famagusta,sha256=CFrcygd8ude5x6OEtfM_Dw0KYHoxpPPzq46KoHVxjjc,2028 +pytz/zoneinfo/Asia/Gaza,sha256=t0YxcUQL53VNKnKbKijn0OE_MaryEynonabse-iTtzs,3844 +pytz/zoneinfo/Asia/Harbin,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Hebron,sha256=6Y0USHKx-xoCxCr_WpCuM3olP1vUGnzrcnGiyQFcqdQ,3872 +pytz/zoneinfo/Asia/Ho_Chi_Minh,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337 +pytz/zoneinfo/Asia/Hong_Kong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233 +pytz/zoneinfo/Asia/Hovd,sha256=Zn4PLGlD-URJDsbChor5bqWTzuAil2tbrGJW0j5TLbs,877 +pytz/zoneinfo/Asia/Irkutsk,sha256=IVuoXCwdeI-KIUfFkEt6yBjqYP3V9GTrF-_WLnffFzk,1229 +pytz/zoneinfo/Asia/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/Asia/Jakarta,sha256=TvEzBvSzfzFCdOsMAZ0QgR95JA5xf3kAZONhy5gEXRE,383 +pytz/zoneinfo/Asia/Jayapura,sha256=ihzUd-L8HUVqG-Na10MyPE-YYwjVFj-xerqjTN4EJZs,221 +pytz/zoneinfo/Asia/Jerusalem,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Asia/Kabul,sha256=JZEbo8bSj_L7HnXUm2gAUlNlCvJlRJhFkSHCg5o3ggk,194 +pytz/zoneinfo/Asia/Kamchatka,sha256=KY1PlJvRSNkY_5hyJBxj5DDweeYVQaBK05ZgL3kdcCY,1152 +pytz/zoneinfo/Asia/Karachi,sha256=iB-mWMTXUyfBwAkZdz8_UmEw0xsgxIub-KNI7akzhkk,379 +pytz/zoneinfo/Asia/Kashgar,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151 +pytz/zoneinfo/Asia/Kathmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198 +pytz/zoneinfo/Asia/Katmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198 +pytz/zoneinfo/Asia/Khandyga,sha256=bKfmw6k5qYDQsEHG3Mv-VYis3YhCeV7qijDxfxQNn_g,1257 +pytz/zoneinfo/Asia/Kolkata,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 +pytz/zoneinfo/Asia/Krasnoyarsk,sha256=D5KE_1wWSD2YdixDy8n3LBNaAlE1_y3TWXw6NrxFKKA,1193 +pytz/zoneinfo/Asia/Kuala_Lumpur,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Asia/Kuching,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469 +pytz/zoneinfo/Asia/Kuwait,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Macao,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 +pytz/zoneinfo/Asia/Macau,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 +pytz/zoneinfo/Asia/Magadan,sha256=HccEEXBQvMmLoC_JE-zP_MlLAZ1WmNLQLfM3tJt55M4,1208 +pytz/zoneinfo/Asia/Makassar,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 +pytz/zoneinfo/Asia/Manila,sha256=ujfq0kl1EhxcYSOrG-FS750aNaYUt1TT4bFuK4EcL_c,328 +pytz/zoneinfo/Asia/Muscat,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Asia/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 +pytz/zoneinfo/Asia/Novokuznetsk,sha256=pyxxtSUtYDeVmFk0Cg-F33laZS0iKtde9_GJnL9f0KM,1151 +pytz/zoneinfo/Asia/Novosibirsk,sha256=5K2-Gx15ThlHfolyW85S5zREtAcMjeHBYWK4E8x2LdY,1207 +pytz/zoneinfo/Asia/Omsk,sha256=HyXIWItJXBKVHUzWcQPi1Mmd6ZLmZk-QhRUo9Kv2XOI,1193 +pytz/zoneinfo/Asia/Oral,sha256=WQT4qRmC9RI_ll8zB9FvkAL8ezGb8qoqWd75GTlC7kQ,991 +pytz/zoneinfo/Asia/Phnom_Penh,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Pontianak,sha256=inOXwuKtjKv1z_eliPZSIqjSt6whtuxhPeG1YpjU_BQ,353 +pytz/zoneinfo/Asia/Pyongyang,sha256=_-g3GnDAtfDX4XAktXH9jFouLUDmOovnjoOfvRpUDsE,237 +pytz/zoneinfo/Asia/Qatar,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185 +pytz/zoneinfo/Asia/Qostanay,sha256=HIjln8QIPNRU6MkWzyPi6vDrjlmVZ4XzFxcUHtXMi7s,1025 +pytz/zoneinfo/Asia/Qyzylorda,sha256=JZLNN6NuLkqaWEeVaCZiW_gL6BrIFL9lr65iK7myVPg,1011 +pytz/zoneinfo/Asia/Rangoon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Asia/Riyadh,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Saigon,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337 +pytz/zoneinfo/Asia/Sakhalin,sha256=xzAor82ihAe-yXEwC6OWiMzo9b6Z-oQl39NIkU5Hhbs,1188 +pytz/zoneinfo/Asia/Samarkand,sha256=zJKSRt3lEvd6Qvg9b49QAyO4cTJyVnTKyPYcyudpHxk,563 +pytz/zoneinfo/Asia/Seoul,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 +pytz/zoneinfo/Asia/Shanghai,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Asia/Srednekolymsk,sha256=efaaT8iFHrcccp-VZKNMvtTuPLNjG5V9JH5KKHhH3SI,1194 +pytz/zoneinfo/Asia/Taipei,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 +pytz/zoneinfo/Asia/Tashkent,sha256=apRPy251fSRy_ixsg3BOZNmUbHdO86P5-PdgC1Xws7U,577 +pytz/zoneinfo/Asia/Tbilisi,sha256=zQ-2bVq5_USUSbwN6q0qvWjD-HXkKaH4ifMVq1lEeIM,1021 +pytz/zoneinfo/Asia/Tehran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248 +pytz/zoneinfo/Asia/Tel_Aviv,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Asia/Thimbu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189 +pytz/zoneinfo/Asia/Thimphu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189 +pytz/zoneinfo/Asia/Tokyo,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 +pytz/zoneinfo/Asia/Tomsk,sha256=cr0ULZgWBnQfzDiJeYmqpA7Xo5QRzurvrHsrbZsnhOQ,1207 +pytz/zoneinfo/Asia/Ujung_Pandang,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 +pytz/zoneinfo/Asia/Ulaanbaatar,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877 +pytz/zoneinfo/Asia/Ulan_Bator,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877 +pytz/zoneinfo/Asia/Urumqi,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151 +pytz/zoneinfo/Asia/Ust-Nera,sha256=zsG8kgnw0Fcs5N2WwNTVmvWkTlpwf7Oo8y68HcXjYyw,1238 +pytz/zoneinfo/Asia/Vientiane,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Vladivostok,sha256=XMQLMh5SPbI6C4R3UO4KhbnG4hWVkHNedzCQeqxFk6A,1194 +pytz/zoneinfo/Asia/Yakutsk,sha256=PPNrRGgg9jefOUM-6M8XqaIm-ElfmRZSWAtSGKLzNXQ,1193 +pytz/zoneinfo/Asia/Yangon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Asia/Yekaterinburg,sha256=4NyEW6Xjr4UsWPh63HIPI4G6GT_tVG1Xkgc2xbwGjzA,1229 +pytz/zoneinfo/Asia/Yerevan,sha256=FM0pUA4NbTWBb_CsJ5KCLVrLoNmad7njBKqFrJBDoxE,1137 +pytz/zoneinfo/Atlantic/Azores,sha256=NyNrE2YIwL9yVddpECcYWwci5JzrfjxiIXP7RP0MrL8,3498 +pytz/zoneinfo/Atlantic/Bermuda,sha256=LNGKfMsnYvwImjTyzXrLhMOHHDu7qI67RbYNKvvI15I,2396 +pytz/zoneinfo/Atlantic/Canary,sha256=ymK9ufqphvNjDK3hzikN4GfkcR3QeCBiPKyVc6FjlbA,1897 +pytz/zoneinfo/Atlantic/Cape_Verde,sha256=o92pLdLFX_b9vUiq3rNpca4tupIO3dx9rNrnPcA8474,256 +pytz/zoneinfo/Atlantic/Faeroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 +pytz/zoneinfo/Atlantic/Faroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 +pytz/zoneinfo/Atlantic/Jan_Mayen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Atlantic/Madeira,sha256=21Zcy0xRqDN3oY8jmjjO-LI7aC3G9mcS9ytaYg0g7ik,3503 +pytz/zoneinfo/Atlantic/Reykjavik,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Atlantic/South_Georgia,sha256=I9SAcPPumy6Xf9P7dg2aE16oxwDIqyKFqinJTC-XsgM,150 +pytz/zoneinfo/Atlantic/St_Helena,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Atlantic/Stanley,sha256=siEjXTAuTum_4XGtS98MBE34XW_5xgXShEX5OMnSFjo,1200 +pytz/zoneinfo/Australia/ACT,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Adelaide,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208 +pytz/zoneinfo/Australia/Brisbane,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419 +pytz/zoneinfo/Australia/Broken_Hill,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229 +pytz/zoneinfo/Australia/Canberra,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Currie,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/Darwin,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325 +pytz/zoneinfo/Australia/Eucla,sha256=i1-XGG8I6E0dXIdWGF4DlkfDLWhiAxJ_3gMpt-nm_u4,456 +pytz/zoneinfo/Australia/Hobart,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/LHI,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846 +pytz/zoneinfo/Australia/Lindeman,sha256=xM6Udx22oLNoLR1Y7GQhHOYov8nw3xQNqgc_NVQ2JK4,475 +pytz/zoneinfo/Australia/Lord_Howe,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846 +pytz/zoneinfo/Australia/Melbourne,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190 +pytz/zoneinfo/Australia/NSW,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/North,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325 +pytz/zoneinfo/Australia/Perth,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446 +pytz/zoneinfo/Australia/Queensland,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419 +pytz/zoneinfo/Australia/South,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208 +pytz/zoneinfo/Australia/Sydney,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Tasmania,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/Victoria,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190 +pytz/zoneinfo/Australia/West,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446 +pytz/zoneinfo/Australia/Yancowinna,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229 +pytz/zoneinfo/Brazil/Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/Brazil/DeNoronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702 +pytz/zoneinfo/Brazil/East,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430 +pytz/zoneinfo/Brazil/West,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590 +pytz/zoneinfo/CET,sha256=o4omkrM_IsITxooUo8krM921XfBdvRs9JhwGXGd-Ypg,2094 +pytz/zoneinfo/CST6CDT,sha256=WGbtZ1FwjRX6Jeo_TCXKsfeDs4V9uhXGJfcnLJhk3s0,2310 +pytz/zoneinfo/Canada/Atlantic,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 +pytz/zoneinfo/Canada/Central,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/Canada/Eastern,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/Canada/Mountain,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/Canada/Newfoundland,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 +pytz/zoneinfo/Canada/Pacific,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 +pytz/zoneinfo/Canada/Saskatchewan,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 +pytz/zoneinfo/Canada/Yukon,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614 +pytz/zoneinfo/Chile/Continental,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515 +pytz/zoneinfo/Chile/EasterIsland,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219 +pytz/zoneinfo/Cuba,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 +pytz/zoneinfo/EET,sha256=gGVsW5-qnI7ty8vqVK1ADWhunrvAT8kUC79GUf-_7G8,1908 +pytz/zoneinfo/EST,sha256=uKE_VPKfxGyYEsyqV_DdE2MW55vs_qUioOdIn5Goobc,114 +pytz/zoneinfo/EST5EDT,sha256=fwzEMT1jgnY2dDjd0EqDl26_7LC-oF48Bd4ng5311H0,2310 +pytz/zoneinfo/Egypt,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399 +pytz/zoneinfo/Eire,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492 +pytz/zoneinfo/Etc/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT+1,sha256=1Qzl2X9rQ_RXEf11yH09wQZCr_ph6UdFP7E0yu9s-IQ,116 +pytz/zoneinfo/Etc/GMT+10,sha256=JEQyQyQlkC0o6ZTdeVjZhCIOh6cK5TF7H00Pkls-sUI,117 +pytz/zoneinfo/Etc/GMT+11,sha256=tWvcvYMFCaE60nJVvDrrov7stJvs1KQYOyrhl3dzcUs,117 +pytz/zoneinfo/Etc/GMT+12,sha256=b70HEhErq8IJmq8x7cOZy4eR__3fq5uHHpjvPBEHqMA,117 +pytz/zoneinfo/Etc/GMT+2,sha256=T6Ep5zhslBKbYaECFUB6gUKh3iTZPyMoW1kjhonxrUo,116 +pytz/zoneinfo/Etc/GMT+3,sha256=QGoYrE04bUJ-OzL37dt2MZT5FxWNLpJDPVXgJbstYZA,116 +pytz/zoneinfo/Etc/GMT+4,sha256=RWrkNki-wV7X-coe0VvufBe6LrWVpkPJgia5QQYEnBo,116 +pytz/zoneinfo/Etc/GMT+5,sha256=oRmeC41dgYXT-zzyZIRKXN9IvdL2Da5nTuwmG2_prIA,116 +pytz/zoneinfo/Etc/GMT+6,sha256=d6dAnwiejyFI2n7AzFlFW0aFAT6zYNEjBIEG0uu0sbQ,116 +pytz/zoneinfo/Etc/GMT+7,sha256=TqjYbzd0YHpx1wisFg08J19wTpg6ztJLLongZY_lozs,116 +pytz/zoneinfo/Etc/GMT+8,sha256=th_8bIMmYgRPCesBrbmBhRr0jQO7whd70LiY9HfwJyk,116 +pytz/zoneinfo/Etc/GMT+9,sha256=Qq5E6iUS7JMJIymT7YoqlI8MtqtVy0mr9t6zWFtWc9Y,116 +pytz/zoneinfo/Etc/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT-1,sha256=73F1eU8uAQGP3mcoB2q99CjfManGFHk3fefljp9pYC4,117 +pytz/zoneinfo/Etc/GMT-10,sha256=fKWWNwLBOp1OkKjtc1w9LIXJR1mTTD-JdvYflRy1IrU,118 +pytz/zoneinfo/Etc/GMT-11,sha256=D2S79n6psa9t9_2vj5wIrFpHH2OJLcCKP6vtwzFZINY,118 +pytz/zoneinfo/Etc/GMT-12,sha256=me4V6lmWI8gSr8H7N41WAD0Eww1anh_EF34Qr9UoSnI,118 +pytz/zoneinfo/Etc/GMT-13,sha256=xbmbG1BQA6Dlpa_iUwEGyJxW4a3t6lmawdPKAE8vbR8,118 +pytz/zoneinfo/Etc/GMT-14,sha256=PpXoREBh02qFpvxVMj2pV9IAzSQvBE7XPvnN9qSZ-Kc,118 +pytz/zoneinfo/Etc/GMT-2,sha256=ve6hWLdeuiLhqagaWLqMD6HNybS1chRwjudfTZ2bYBE,117 +pytz/zoneinfo/Etc/GMT-3,sha256=N77jILanuLDVkLsdujXZSu-dsHiwN5MIpwh7fMUifso,117 +pytz/zoneinfo/Etc/GMT-4,sha256=LSko5fVHqPl5zfwjGqkbMa_OFnvtpT6o_4xYxNz9n5o,117 +pytz/zoneinfo/Etc/GMT-5,sha256=uLaSR5Mb18HRTsAA5SveY9PAJ97dO8QzIWqNXe3wZb4,117 +pytz/zoneinfo/Etc/GMT-6,sha256=JSN-RUAphJ50fpIv7cYC6unrtrz9S1Wma-piDHlGe7c,117 +pytz/zoneinfo/Etc/GMT-7,sha256=vVAOF8xU9T9ESnw68c0SFXpcvkoopaiwTR0zbefHHSU,117 +pytz/zoneinfo/Etc/GMT-8,sha256=S7xFQbFMpiDZy4v5L4D9fCrjRIzzoLC5p8Se23xi7us,117 +pytz/zoneinfo/Etc/GMT-9,sha256=I5vHNmUK-Yyg_S1skFN44VGVzBgktjFgVQiDIKO4aMI,117 +pytz/zoneinfo/Etc/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Europe/Amsterdam,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Andorra,sha256=gTB5jCQmvIw3JJi1_vAcOYuhtzPBR6RXUx9gVV6p6ug,1742 +pytz/zoneinfo/Europe/Astrakhan,sha256=ZeGDZjwVVRoeR-J642zEnN26BPL58ViTJLbwnk7pLXk,1151 +pytz/zoneinfo/Europe/Athens,sha256=XDY-FBUddRyQHN8GxQLZ4awjuOlWlzlUdjv7OdXFNzA,2262 +pytz/zoneinfo/Europe/Belfast,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Belgrade,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Berlin,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Bratislava,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 +pytz/zoneinfo/Europe/Brussels,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Bucharest,sha256=nfg6-bU2D6DMEWb9EMIBR5kxnNsbDSx0UKfHH_ZzqFc,2184 +pytz/zoneinfo/Europe/Budapest,sha256=lNwqxWciBvw9ei81VQwIKHbC_ZDJjpgHU6HFg4wCUkY,2368 +pytz/zoneinfo/Europe/Busingen,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Europe/Chisinau,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 +pytz/zoneinfo/Europe/Copenhagen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Dublin,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492 +pytz/zoneinfo/Europe/Gibraltar,sha256=a87WpaBlvxI4gAU9OpQOkN8VUJbirVWYf-VfFLTIoS4,3068 +pytz/zoneinfo/Europe/Guernsey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Helsinki,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 +pytz/zoneinfo/Europe/Isle_of_Man,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/Europe/Jersey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Kaliningrad,sha256=s7GXSe1YvMcs7AiUhHNTA6I4nAOQn_Kmz_ZqJYO-LMM,1493 +pytz/zoneinfo/Europe/Kiev,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Kirov,sha256=P7T2Zf5Eo6o4L4Dbg_BfiFjUgTj0dQXlrwY-QZ1eBVk,1185 +pytz/zoneinfo/Europe/Kyiv,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Lisbon,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497 +pytz/zoneinfo/Europe/Ljubljana,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/London,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Luxembourg,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Madrid,sha256=mkLX03rW3t0tmzKBIPe_noUvaFDErwC6_5ZPZZsWHOo,2614 +pytz/zoneinfo/Europe/Malta,sha256=EhKcbPL47765tWAiQ57cusaK2TaIQqZCgtJoEZs3Ud0,2620 +pytz/zoneinfo/Europe/Mariehamn,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 +pytz/zoneinfo/Europe/Minsk,sha256=KgPm0fHycntgd3xbTmmDl4O13Xh_9e2zUnd8XFSU29o,1307 +pytz/zoneinfo/Europe/Monaco,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962 +pytz/zoneinfo/Europe/Moscow,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 +pytz/zoneinfo/Europe/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 +pytz/zoneinfo/Europe/Oslo,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Paris,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962 +pytz/zoneinfo/Europe/Podgorica,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Prague,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 +pytz/zoneinfo/Europe/Riga,sha256=hJ2_0m1taW9IuA-hMyP5n-WX7YOrR0heKszJhgljRWk,2198 +pytz/zoneinfo/Europe/Rome,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Samara,sha256=nXL0IxbT6qu10CNuaDHxx4W1OaAnaaKTtIJ9N9URMoU,1201 +pytz/zoneinfo/Europe/San_Marino,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Sarajevo,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Saratov,sha256=ygwjvXN13TgaWxjg6ysWEnHWNxwrVtkEbrk8t9bzVVw,1169 +pytz/zoneinfo/Europe/Simferopol,sha256=tzl7xdNVSZprNCul4YE5LSpoR9JoujmOq8VbbB8wHic,1469 +pytz/zoneinfo/Europe/Skopje,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Sofia,sha256=hCQKXfMNrnA5xHNw_uzTjKzVw4-Bvsq5oGO4yUCv5tY,2077 +pytz/zoneinfo/Europe/Stockholm,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Tallinn,sha256=4a6JC0aIpMzqIV7O35zoG0LLJwkQq5AoXZ2ivkic6-w,2148 +pytz/zoneinfo/Europe/Tirane,sha256=ztlZyCS9WCXeVW8nBun3Tyi5HUY0EtFbiBbEc1gucuw,2084 +pytz/zoneinfo/Europe/Tiraspol,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 +pytz/zoneinfo/Europe/Ulyanovsk,sha256=c8Ad5p7CKj_1cCA7lVRpcPqbQXGYaX83cuu6uIFx-Bg,1253 +pytz/zoneinfo/Europe/Uzhgorod,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Vaduz,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Europe/Vatican,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Vienna,sha256=ZmI3kADE6bnrJEccqh73XXBY36L1G4DkpiTQImtNrUk,2200 +pytz/zoneinfo/Europe/Vilnius,sha256=UFzRX3orCTB8d9IzlxJPy5eUA2oBPuCu1UJl-2D7C3U,2162 +pytz/zoneinfo/Europe/Volgograd,sha256=RgFvt7mzZ-TtIKL9BVHmoNZLIeLIuiDdXeY10g2_vks,1193 +pytz/zoneinfo/Europe/Warsaw,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 +pytz/zoneinfo/Europe/Zagreb,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Zaporozhye,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Zurich,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Factory,sha256=aFFlKx93HXoJoF4SSuTlD8cZtJA-ne5oKzAa6eX2V4k,116 +pytz/zoneinfo/GB,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/GB-Eire,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/HST,sha256=1YkCncvgL9Z5CmUo4Vk8VbQmgA7ZAQ0PtE37j1yOli8,115 +pytz/zoneinfo/Hongkong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233 +pytz/zoneinfo/Iceland,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Indian/Antananarivo,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Chagos,sha256=2errXzKdFIcpU0L-XRhSHxhNabIzbI5lXV3Pq6lt40Y,185 +pytz/zoneinfo/Indian/Christmas,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Indian/Cocos,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Indian/Comoro,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Kerguelen,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185 +pytz/zoneinfo/Indian/Mahe,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Indian/Maldives,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185 +pytz/zoneinfo/Indian/Mauritius,sha256=Znqrc1chimlciJsYBOl0NvIHnrNdCxncGxWczq1PBeI,227 +pytz/zoneinfo/Indian/Mayotte,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Reunion,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Iran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248 +pytz/zoneinfo/Israel,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 +pytz/zoneinfo/Japan,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 +pytz/zoneinfo/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302 +pytz/zoneinfo/Libya,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 +pytz/zoneinfo/MET,sha256=i3CKSuP4N_PAj7o-Cbk8zPEdFs0CWWBCAfg2JXDx5V8,2094 +pytz/zoneinfo/MST,sha256=6IQwvtT12Bz1pTiqFuoVxNY-4ViS7ZrYHo5nPWwzKPw,114 +pytz/zoneinfo/MST7MDT,sha256=910Ek32FKoSyZWY_H19VHaVvqb-JsvnWTOOHvhrKsE0,2310 +pytz/zoneinfo/Mexico/BajaNorte,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/Mexico/BajaSur,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128 +pytz/zoneinfo/Mexico/General,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222 +pytz/zoneinfo/NZ,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/NZ-CHAT,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054 +pytz/zoneinfo/Navajo,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/PRC,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/PST8PDT,sha256=Q7TCLkE69a6g7mPoPAkqhg-0dStyiAC0jVlM72KG_R8,2310 +pytz/zoneinfo/Pacific/Apia,sha256=M3QKsp75Q7H1X3aeE_9ZqQli9aEkNCCQctZQ5sEKu00,598 +pytz/zoneinfo/Pacific/Auckland,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Pacific/Bougainville,sha256=hWE86eXnNx-vABbp7-YSIqWyecHPMIWLftVloAoPhL8,254 +pytz/zoneinfo/Pacific/Chatham,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054 +pytz/zoneinfo/Pacific/Chuuk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Easter,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219 +pytz/zoneinfo/Pacific/Efate,sha256=oSxNcQYx5-1FU2_yHzHI-hT-dMJcPxzy4XmdI1UxXAo,524 +pytz/zoneinfo/Pacific/Enderbury,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220 +pytz/zoneinfo/Pacific/Fakaofo,sha256=qOodpTMKjztvZIXVLe_f_kZ6WcHl9fCLE9ZsyvdFKLI,186 +pytz/zoneinfo/Pacific/Fiji,sha256=jB5FbOsCnHVQQ2ohPiWEQUPhG6JybB3Nog3qT6WJQ0I,564 +pytz/zoneinfo/Pacific/Funafuti,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Galapagos,sha256=_GJUYOjSiIjoNBO2qdq23isLMJ4NCVk3DKIRGeDc8BA,224 +pytz/zoneinfo/Pacific/Gambier,sha256=gAS7gr1HH_re0uYnL6eWo5KGJ-B5QaiM8mV2cY5mQxE,150 +pytz/zoneinfo/Pacific/Guadalcanal,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Guam,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 +pytz/zoneinfo/Pacific/Honolulu,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/Pacific/Johnston,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/Pacific/Kanton,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220 +pytz/zoneinfo/Pacific/Kiritimati,sha256=hYk1Ooz-Lj1PuZCbNV2WJIvOLtCwSwq2u63cb1Z-3NQ,224 +pytz/zoneinfo/Pacific/Kosrae,sha256=Q0jrb4zeDrd61bU4V8TqjMc0Iep8rWZyZqJ0uqsunxs,337 +pytz/zoneinfo/Pacific/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302 +pytz/zoneinfo/Pacific/Majuro,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Marquesas,sha256=FTxPJTWtk48LVb3N2U64KLpLsmvu0DQBubTCg-dvyGM,159 +pytz/zoneinfo/Pacific/Midway,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Nauru,sha256=9ASKgLHB-8nsTEK1ApzfTH0yQtbNAmGX-JI7uHZiqnA,238 +pytz/zoneinfo/Pacific/Niue,sha256=OllXxukncR7a-SMmdFox5az1xpIPMhbahQhtObmpuDM,189 +pytz/zoneinfo/Pacific/Norfolk,sha256=DMdX1Bm18lzNuiCWzwfeHUMRGXPS8v5AWnh-_EX_AZw,866 +pytz/zoneinfo/Pacific/Noumea,sha256=tkHxxnxsXTOqz3YzWi0mkhTCIONzg-W7EpSRMdPjKdQ,290 +pytz/zoneinfo/Pacific/Pago_Pago,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Palau,sha256=aN2HbT0reqwKrtLKDK9M2zb0d0ikdNlTrrntVxdH66o,166 +pytz/zoneinfo/Pacific/Pitcairn,sha256=U4jAUuvsRNoy8XrPa16YpcXCcqHJY0u6JvCNgPEWO1c,188 +pytz/zoneinfo/Pacific/Pohnpei,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Ponape,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Port_Moresby,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Rarotonga,sha256=wPEsoXbyDnuhfzkgLvUqhSzrMx_FD42uAPluSPMh3Bc,589 +pytz/zoneinfo/Pacific/Saipan,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 +pytz/zoneinfo/Pacific/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Tahiti,sha256=BRff9G3E-iWKhOWR1Wu02Z0iMgjrwDXV-XNrqItXdTY,151 +pytz/zoneinfo/Pacific/Tarawa,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Tongatapu,sha256=OppBZqTAZib9HY7U9AC-JavO7m6NxPGUtUfPQAl9oBY,358 +pytz/zoneinfo/Pacific/Truk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Wake,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Wallis,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Yap,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Poland,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 +pytz/zoneinfo/Portugal,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497 +pytz/zoneinfo/ROC,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 +pytz/zoneinfo/ROK,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 +pytz/zoneinfo/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Turkey,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/US/Alaska,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 +pytz/zoneinfo/US/Aleutian,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/US/Arizona,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/US/Central,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592 +pytz/zoneinfo/US/East-Indiana,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/US/Eastern,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552 +pytz/zoneinfo/US/Hawaii,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/US/Indiana-Starke,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/US/Michigan,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 +pytz/zoneinfo/US/Mountain,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/US/Pacific,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852 +pytz/zoneinfo/US/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/W-SU,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 +pytz/zoneinfo/WET,sha256=Sc0l03EfVs_aIi17I4KyZJFkwiAHat5BgpjuuFDhgQ0,1905 +pytz/zoneinfo/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/iso3166.tab,sha256=oBpdFY8x1GrY5vjMKgbGQYEGgqk5fUYDIPaNVCG2XnE,4791 +pytz/zoneinfo/leapseconds,sha256=fjC39Eu3wB6I4g7x_VL7HzvDVbiKbLUjfQAEgo7442I,3257 +pytz/zoneinfo/tzdata.zi,sha256=8PWtzwDNZfLJU8Wa6Ktci7tg9V5mpvh26Vb0P8jBU0w,109390 +pytz/zoneinfo/zone.tab,sha256=qSLfeCWE3tsCDIIQbr71DMkmCUXTIUEgNZgfN-60d-Y,18846 +pytz/zoneinfo/zone1970.tab,sha256=FJErvL9wggoFluO2WceYn8ZQ-nA9A073Lub1x2Pzg40,17582 +pytz/zoneinfo/zonenow.tab,sha256=YoPd7huhHsKlJliOO-eMIBE5-bHBKpbfjkSJQFAto6I,8311 diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0b18a281107a0448a9980396d9d324ea2aa7a7f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..af44f198c687e245aada835efbab2f75ed2c9baf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pytz diff --git a/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/llmeval-env/lib/python3.10/site-packages/regex/__init__.py b/llmeval-env/lib/python3.10/site-packages/regex/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb06564ab033a2b0b501f7f41efb169dacd1f801 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/regex/__init__.py @@ -0,0 +1,3 @@ +from .regex import * +from . import regex +__all__ = regex.__all__ diff --git a/llmeval-env/lib/python3.10/site-packages/regex/regex.py b/llmeval-env/lib/python3.10/site-packages/regex/regex.py new file mode 100644 index 0000000000000000000000000000000000000000..428a159af71929242d9b5bd1de9524094cc1379d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/regex/regex.py @@ -0,0 +1,746 @@ +# +# Secret Labs' Regular Expression Engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# +# 2010-01-16 mrab Python front-end re-written and extended + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to those +found in Perl. It supports both 8-bit and Unicode strings; both the pattern and +the strings being processed can contain null bytes and characters outside the +US ASCII range. + +Regular expressions can contain both special and ordinary characters. Most +ordinary characters, like "A", "a", or "0", are the simplest regular +expressions; they simply match themselves. You can concatenate ordinary +characters, so last matches the string 'last'. + +There are a few differences between the old (legacy) behaviour and the new +(enhanced) behaviour, which are indicated by VERSION0 or VERSION1. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the + newline at the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding + RE. Greedy means that it will match as many repetitions + as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding + RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special + characters. + *+,++,?+ Possessive versions of the previous three special + characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + {m,n}+ Possessive version of the above. + {...} Fuzzy matching constraints. + "\\" Either escapes special characters or signals a special + sequence. + [...] Indicates a set of characters. A "^" as the first + character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. The contents are + captured and can be retrieved or matched later in the + string. + (?flags-flags) VERSION1: Sets/clears the flags for the remainder of + the group or pattern; VERSION0: Sets the flags for the + entire pattern. + (?:...) Non-capturing version of regular parentheses. + (?>...) Atomic non-capturing version of regular parentheses. + (?flags-flags:...) Non-capturing version of regular parentheses with local + flags. + (?P...) The substring matched by the group is accessible by + name. + (?...) The substring matched by the group is accessible by + name. + (?P=name) Matches the text matched earlier by the group named + name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the + string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by .... + (? Matches the text matched by the group named name. + \G Matches the empty string, but only at the position where + the search started. + \h Matches horizontal whitespace. + \K Keeps only what follows for the entire match. + \L Named list. The list is provided as a keyword argument. + \m Matches the empty string, but only at the start of a word. + \M Matches the empty string, but only at the end of a word. + \n Matches the newline character. + \N{name} Matches the named character. + \p{name=value} Matches the character if its property has the specified + value. + \P{name=value} Matches the character if its property hasn't the specified + value. + \r Matches the carriage-return character. + \s Matches any whitespace character; equivalent to + [ \t\n\r\f\v]. + \S Matches any non-whitespace character; equivalent to [^\s]. + \t Matches the tab character. + \uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX. + \UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code + XXXXXXXX. + \v Matches the vertical tab character. + \w Matches any alphanumeric character; equivalent to + [a-zA-Z0-9_] when matching a bytestring or a Unicode string + with the ASCII flag, or the whole range of Unicode + alphanumeric characters (letters plus digits plus + underscore) when matching a Unicode string. With LOCALE, it + will match the set [0-9_] plus characters defined as + letters for the current locale. + \W Matches the complement of \w; equivalent to [^\w]. + \xXX Matches the character with 2-digit hex code XX. + \X Matches a grapheme. + \Z Matches only at the end of the string. + \\ Matches a literal backslash. + +This module exports the following functions: + match Match a regular expression pattern at the beginning of a string. + fullmatch Match a regular expression pattern against all of a string. + search Search a string for the presence of a pattern. + sub Substitute occurrences of a pattern found in a string using a + template string. + subf Substitute occurrences of a pattern found in a string using a + format string. + subn Same as sub, but also return the number of substitutions made. + subfn Same as subf, but also return the number of substitutions made. + split Split a string by the occurrences of a pattern. VERSION1: will + split at zero-width match; VERSION0: won't split at zero-width + match. + splititer Return an iterator yielding the parts of a split string. + findall Find all occurrences of a pattern in a string. + finditer Return an iterator yielding a match object for each match. + compile Compile a pattern into a Pattern object. + purge Clear the regular expression cache. + escape Backslash all non-alphanumerics or special characters in a + string. + +Most of the functions support a concurrent parameter: if True, the GIL will be +released during matching, allowing other Python threads to run concurrently. If +the string changes during matching, the behaviour is undefined. This parameter +is not needed when working on the builtin (immutable) string classes. + +Some of the functions in this module take flags as optional parameters. Most of +these flags can also be set within an RE: + A a ASCII Make \w, \W, \b, \B, \d, and \D match the + corresponding ASCII character categories. Default + when matching a bytestring. + B b BESTMATCH Find the best fuzzy match (default is first). + D DEBUG Print the parsed pattern. + E e ENHANCEMATCH Attempt to improve the fit after finding the first + fuzzy match. + F f FULLCASE Use full case-folding when performing + case-insensitive matching in Unicode. + I i IGNORECASE Perform case-insensitive matching. + L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the + current locale. (One byte per character only.) + M m MULTILINE "^" matches the beginning of lines (after a newline) + as well as the string. "$" matches the end of lines + (before a newline) as well as the end of the string. + P p POSIX Perform POSIX-standard matching (leftmost longest). + R r REVERSE Searches backwards. + S s DOTALL "." matches any character at all, including the + newline. + U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the + Unicode locale. Default when matching a Unicode + string. + V0 V0 VERSION0 Turn on the old legacy behaviour. + V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag + includes the FULLCASE flag. + W w WORD Make \b and \B work with default Unicode word breaks + and make ".", "^" and "$" work with Unicode line + breaks. + X x VERBOSE Ignore whitespace and comments for nicer looking REs. + +This module also defines an exception 'error'. + +""" + +# Public symbols. +__all__ = ["cache_all", "compile", "DEFAULT_VERSION", "escape", "findall", + "finditer", "fullmatch", "match", "purge", "search", "split", "splititer", + "sub", "subf", "subfn", "subn", "template", "Scanner", "A", "ASCII", "B", + "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", "S", "DOTALL", "F", + "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", "POSIX", + "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0", "V1", + "VERSION1", "X", "VERBOSE", "W", "WORD", "error", "Regex", "__version__", + "__doc__", "RegexFlag"] + +__version__ = "2.5.142" + +# -------------------------------------------------------------------- +# Public interface. + +def match(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern at the start of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.match(string, pos, endpos, concurrent, partial, timeout) + +def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern against all of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.fullmatch(string, pos, endpos, concurrent, partial, timeout) + +def search(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Search through string looking for a match to the pattern, returning a + match object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.search(string, pos, endpos, concurrent, partial, timeout) + +def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement repl. repl can be either a string or a callable; if a string, + backslash escapes in it are processed; if a callable, it's passed the match + object and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.sub(repl, string, count, pos, endpos, concurrent, timeout) + +def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement format. format can be either a string or a callable; if a string, + it's treated as a format string; if a callable, it's passed the match object + and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subf(format, string, count, pos, endpos, concurrent, timeout) + +def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement repl. number is the number of substitutions that were made. repl + can be either a string or a callable; if a string, backslash escapes in it + are processed; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subn(repl, string, count, pos, endpos, concurrent, timeout) + +def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement format. number is the number of substitutions that were made. format + can be either a string or a callable; if a string, it's treated as a format + string; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subfn(format, string, count, pos, endpos, concurrent, timeout) + +def split(pattern, string, maxsplit=0, flags=0, concurrent=None, timeout=None, + ignore_unused=False, **kwargs): + """Split the source string by the occurrences of the pattern, returning a + list containing the resulting substrings. If capturing parentheses are used + in pattern, then the text of all groups in the pattern are also returned as + part of the resulting list. If maxsplit is nonzero, at most maxsplit splits + occur, and the remainder of the string is returned as the final element of + the list.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.split(string, maxsplit, concurrent, timeout) + +def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, + timeout=None, ignore_unused=False, **kwargs): + "Return an iterator yielding the parts of a split string." + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.splititer(string, maxsplit, concurrent, timeout) + +def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a list of all matches in the string. The matches may be overlapped + if overlapped is True. If one or more groups are present in the pattern, + return a list of groups; this will be a list of tuples if the pattern has + more than one group. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.findall(string, pos, endpos, overlapped, concurrent, timeout) + +def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + partial=False, concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return an iterator over all matches in the string. The matches may be + overlapped if overlapped is True. For each match, the iterator returns a + match object. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.finditer(string, pos, endpos, overlapped, concurrent, partial, + timeout) + +def compile(pattern, flags=0, ignore_unused=False, cache_pattern=None, **kwargs): + "Compile a regular expression pattern, returning a pattern object." + if cache_pattern is None: + cache_pattern = _cache_all + return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern) + +def purge(): + "Clear the regular expression cache" + _cache.clear() + _locale_sensitive.clear() + +# Whether to cache all patterns. +_cache_all = True + +def cache_all(value=True): + """Sets whether to cache all patterns, even those are compiled explicitly. + Passing None has no effect, but returns the current setting.""" + global _cache_all + + if value is None: + return _cache_all + + _cache_all = value + +def template(pattern, flags=0): + "Compile a template pattern, returning a pattern object." + return _compile(pattern, flags | TEMPLATE, False, {}, False) + +def escape(pattern, special_only=True, literal_spaces=False): + """Escape a string for use as a literal in a pattern. If special_only is + True, escape only special characters, else escape all non-alphanumeric + characters. If literal_spaces is True, don't escape spaces.""" + # Convert it to Unicode. + if isinstance(pattern, bytes): + p = pattern.decode("latin-1") + else: + p = pattern + + s = [] + if special_only: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _METACHARS or c.isspace(): + s.append("\\") + s.append(c) + else: + s.append(c) + else: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _ALNUM: + s.append(c) + else: + s.append("\\") + s.append(c) + + r = "".join(s) + # Convert it back to bytes if necessary. + if isinstance(pattern, bytes): + r = r.encode("latin-1") + + return r + +# -------------------------------------------------------------------- +# Internals. + +import regex._regex_core as _regex_core +import regex._regex as _regex +from threading import RLock as _RLock +from locale import getpreferredencoding as _getpreferredencoding +from regex._regex_core import * +from regex._regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError, + _UnscopedFlagSet, _check_group_features, _compile_firstset, + _compile_replacement, _flatten_code, _fold_case, _get_required_string, + _parse_pattern, _shrink_cache) +from regex._regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source + as _Source, Fuzzy as _Fuzzy) + +# Version 0 is the old behaviour, compatible with the original 're' module. +# Version 1 is the new behaviour, which differs slightly. + +DEFAULT_VERSION = VERSION0 + +_METACHARS = frozenset("()[]{}?*+|^$\\.-#&~") + +_regex_core.DEFAULT_VERSION = DEFAULT_VERSION + +# Caches for the patterns and replacements. +_cache = {} +_cache_lock = _RLock() +_named_args = {} +_replacement_cache = {} +_locale_sensitive = {} + +# Maximum size of the cache. +_MAXCACHE = 500 +_MAXREPCACHE = 500 + +def _compile(pattern, flags, ignore_unused, kwargs, cache_it): + "Compiles a regular expression to a PatternObject." + + global DEFAULT_VERSION + try: + from regex import DEFAULT_VERSION + except ImportError: + pass + + # We won't bother to cache the pattern if we're debugging. + if (flags & DEBUG) != 0: + cache_it = False + + # What locale is this pattern using? + locale_key = (type(pattern), pattern) + if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0: + # This pattern is, or might be, locale-sensitive. + pattern_locale = _getpreferredencoding() + else: + # This pattern is definitely not locale-sensitive. + pattern_locale = None + + def complain_unused_args(): + if ignore_unused: + return + + # Complain about any unused keyword arguments, possibly resulting from a typo. + unused_kwargs = set(kwargs) - {k for k, v in args_needed} + if unused_kwargs: + any_one = next(iter(unused_kwargs)) + raise ValueError('unused keyword argument {!a}'.format(any_one)) + + if cache_it: + try: + # Do we know what keyword arguments are needed? + args_key = pattern, type(pattern), flags + args_needed = _named_args[args_key] + + # Are we being provided with its required keyword arguments? + args_supplied = set() + if args_needed: + for k, v in args_needed: + try: + args_supplied.add((k, frozenset(kwargs[k]))) + except KeyError: + raise error("missing named list: {!r}".format(k)) + + complain_unused_args() + + args_supplied = frozenset(args_supplied) + + # Have we already seen this regular expression and named list? + pattern_key = (pattern, type(pattern), flags, args_supplied, + DEFAULT_VERSION, pattern_locale) + return _cache[pattern_key] + except KeyError: + # It's a new pattern, or new named list for a known pattern. + pass + + # Guess the encoding from the class of the pattern string. + if isinstance(pattern, str): + guess_encoding = UNICODE + elif isinstance(pattern, bytes): + guess_encoding = ASCII + elif isinstance(pattern, Pattern): + if flags: + raise ValueError("cannot process flags argument with a compiled pattern") + + return pattern + else: + raise TypeError("first argument must be a string or compiled pattern") + + # Set the default version in the core code in case it has been changed. + _regex_core.DEFAULT_VERSION = DEFAULT_VERSION + + global_flags = flags + + while True: + caught_exception = None + try: + source = _Source(pattern) + info = _Info(global_flags, source.char_type, kwargs) + info.guess_encoding = guess_encoding + source.ignore_space = bool(info.flags & VERBOSE) + parsed = _parse_pattern(source, info) + break + except _UnscopedFlagSet: + # Remember the global flags for the next attempt. + global_flags = info.global_flags + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + if not source.at_end(): + raise error("unbalanced parenthesis", pattern, source.pos) + + # Check the global flags for conflicts. + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version not in (0, VERSION0, VERSION1): + raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible") + + if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE): + raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible") + + if isinstance(pattern, bytes) and (info.flags & UNICODE): + raise ValueError("cannot use UNICODE flag with a bytes pattern") + + if not (info.flags & _ALL_ENCODINGS): + if isinstance(pattern, str): + info.flags |= UNICODE + else: + info.flags |= ASCII + + reverse = bool(info.flags & REVERSE) + fuzzy = isinstance(parsed, _Fuzzy) + + # Remember whether this pattern as an inline locale flag. + _locale_sensitive[locale_key] = info.inline_locale + + # Fix the group references. + caught_exception = None + try: + parsed.fix_groups(pattern, reverse, False) + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + # Should we print the parsed pattern? + if flags & DEBUG: + parsed.dump(indent=0, reverse=reverse) + + # Optimise the parsed pattern. + parsed = parsed.optimise(info, reverse) + parsed = parsed.pack_characters(info) + + # Get the required string. + req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags) + + # Build the named lists. + named_lists = {} + named_list_indexes = [None] * len(info.named_lists_used) + args_needed = set() + for key, index in info.named_lists_used.items(): + name, case_flags = key + values = frozenset(kwargs[name]) + if case_flags: + items = frozenset(_fold_case(info, v) for v in values) + else: + items = values + named_lists[name] = values + named_list_indexes[index] = items + args_needed.add((name, values)) + + complain_unused_args() + + # Check the features of the groups. + _check_group_features(info, parsed) + + # Compile the parsed pattern. The result is a list of tuples. + code = parsed.compile(reverse) + + # Is there a group call to the pattern as a whole? + key = (0, reverse, fuzzy) + ref = info.call_refs.get(key) + if ref is not None: + code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )] + + # Add the final 'success' opcode. + code += [(_OP.SUCCESS, )] + + # Compile the additional copies of the groups that we need. + for group, rev, fuz in info.additional_groups: + code += group.compile(rev, fuz) + + # Flatten the code into a list of ints. + code = _flatten_code(code) + + if not parsed.has_simple_start(): + # Get the first set, if possible. + try: + fs_code = _compile_firstset(info, parsed.get_firstset(reverse)) + fs_code = _flatten_code(fs_code) + code = fs_code + code + except _FirstSetError: + pass + + # The named capture groups. + index_group = dict((v, n) for n, v in info.group_index.items()) + + # Create the PatternObject. + # + # Local flags like IGNORECASE affect the code generation, but aren't needed + # by the PatternObject itself. Conversely, global flags like LOCALE _don't_ + # affect the code generation but _are_ needed by the PatternObject. + compiled_pattern = _regex.compile(pattern, info.flags | version, code, + info.group_index, index_group, named_lists, named_list_indexes, + req_offset, req_chars, req_flags, info.group_count) + + # Do we need to reduce the size of the cache? + if len(_cache) >= _MAXCACHE: + with _cache_lock: + _shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE) + + if cache_it: + if (info.flags & LOCALE) == 0: + pattern_locale = None + + args_needed = frozenset(args_needed) + + # Store this regular expression and named list. + pattern_key = (pattern, type(pattern), flags, args_needed, + DEFAULT_VERSION, pattern_locale) + _cache[pattern_key] = compiled_pattern + + # Store what keyword arguments are needed. + _named_args[args_key] = args_needed + + return compiled_pattern + +def _compile_replacement_helper(pattern, template): + "Compiles a replacement template." + # This function is called by the _regex module. + + # Have we seen this before? + key = pattern.pattern, pattern.flags, template + compiled = _replacement_cache.get(key) + if compiled is not None: + return compiled + + if len(_replacement_cache) >= _MAXREPCACHE: + _replacement_cache.clear() + + is_unicode = isinstance(template, str) + source = _Source(template) + if is_unicode: + def make_string(char_codes): + return "".join(chr(c) for c in char_codes) + else: + def make_string(char_codes): + return bytes(char_codes) + + compiled = [] + literal = [] + while True: + ch = source.get() + if not ch: + break + if ch == "\\": + # '_compile_replacement' will return either an int group reference + # or a string literal. It returns items (plural) in order to handle + # a 2-character literal (an invalid escape sequence). + is_group, items = _compile_replacement(source, pattern, is_unicode) + if is_group: + # It's a group, so first flush the literal. + if literal: + compiled.append(make_string(literal)) + literal = [] + compiled.extend(items) + else: + literal.extend(items) + else: + literal.append(ord(ch)) + + # Flush the literal. + if literal: + compiled.append(make_string(literal)) + + _replacement_cache[key] = compiled + + return compiled + +# We define Pattern here after all the support objects have been defined. +_pat = _compile('', 0, False, {}, False) +Pattern = type(_pat) +Match = type(_pat.match('')) +del _pat + +# Make Pattern public for typing annotations. +__all__.append("Pattern") +__all__.append("Match") + +# We'll define an alias for the 'compile' function so that the repr of a +# pattern object is eval-able. +Regex = compile + +# Register myself for pickling. +import copyreg as _copy_reg + +def _pickle(pattern): + return _regex.compile, pattern._pickled_data + +_copy_reg.pickle(Pattern, _pickle) diff --git a/llmeval-env/lib/python3.10/site-packages/regex/test_regex.py b/llmeval-env/lib/python3.10/site-packages/regex/test_regex.py new file mode 100644 index 0000000000000000000000000000000000000000..8a02751cd2cb83b2595cdb7ceb49e7ae791fd4a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/regex/test_regex.py @@ -0,0 +1,4465 @@ +from weakref import proxy +import copy +import pickle +import regex +import string +import sys +import unittest + +# String subclasses for issue 18468. +class StrSubclass(str): + def __getitem__(self, index): + return StrSubclass(super().__getitem__(index)) + +class BytesSubclass(bytes): + def __getitem__(self, index): + return BytesSubclass(super().__getitem__(index)) + +class RegexTests(unittest.TestCase): + PATTERN_CLASS = "" + FLAGS_WITH_COMPILED_PAT = "cannot process flags argument with a compiled pattern" + INVALID_GROUP_REF = "invalid group reference" + MISSING_GT = "missing >" + BAD_GROUP_NAME = "bad character in group name" + MISSING_GROUP_NAME = "missing group name" + MISSING_LT = "missing <" + UNKNOWN_GROUP_I = "unknown group" + UNKNOWN_GROUP = "unknown group" + BAD_ESCAPE = r"bad escape \(end of pattern\)" + BAD_OCTAL_ESCAPE = r"bad escape \\" + BAD_SET = "unterminated character set" + STR_PAT_ON_BYTES = "cannot use a string pattern on a bytes-like object" + BYTES_PAT_ON_STR = "cannot use a bytes pattern on a string-like object" + STR_PAT_BYTES_TEMPL = "expected str instance, bytes found" + BYTES_PAT_STR_TEMPL = "expected a bytes-like object, str found" + BYTES_PAT_UNI_FLAG = "cannot use UNICODE flag with a bytes pattern" + MIXED_FLAGS = "ASCII, LOCALE and UNICODE flags are mutually incompatible" + MISSING_RPAREN = "missing \\)" + TRAILING_CHARS = "unbalanced parenthesis" + BAD_CHAR_RANGE = "bad character range" + NOTHING_TO_REPEAT = "nothing to repeat" + MULTIPLE_REPEAT = "multiple repeat" + OPEN_GROUP = "cannot refer to an open group" + DUPLICATE_GROUP = "duplicate group" + CANT_TURN_OFF = "bad inline flags: cannot turn flags off" + UNDEF_CHAR_NAME = "undefined character name" + + def assertTypedEqual(self, actual, expect, msg=None): + self.assertEqual(actual, expect, msg) + + def recurse(actual, expect): + if isinstance(expect, (tuple, list)): + for x, y in zip(actual, expect): + recurse(x, y) + else: + self.assertIs(type(actual), type(expect), msg) + + recurse(actual, expect) + + def test_weakref(self): + s = 'QabbbcR' + x = regex.compile('ab+c') + y = proxy(x) + if x.findall('QabbbcR') != y.findall('QabbbcR'): + self.fail() + + def test_search_star_plus(self): + self.assertEqual(regex.search('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.search('x*', 'axx').span(), (0, 0)) + self.assertEqual(regex.search('x+', 'axx').span(0), (1, 3)) + self.assertEqual(regex.search('x+', 'axx').span(), (1, 3)) + self.assertEqual(regex.search('x', 'aaa'), None) + self.assertEqual(regex.match('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.match('a*', 'xxx').span(), (0, 0)) + self.assertEqual(regex.match('x*', 'xxxa').span(0), (0, 3)) + self.assertEqual(regex.match('x*', 'xxxa').span(), (0, 3)) + self.assertEqual(regex.match('a+', 'xxx'), None) + + def bump_num(self, matchobj): + int_value = int(matchobj[0]) + return str(int_value + 1) + + def test_basic_regex_sub(self): + self.assertEqual(regex.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), + '9.3 -3 24x100y') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), + '9.3 -3 23x99y') + + self.assertEqual(regex.sub('.', lambda m: r"\n", 'x'), "\\n") + self.assertEqual(regex.sub('.', r"\n", 'x'), "\n") + + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g<1>', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), + 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g<1>\g<1>', 'xx'), 'xxxx') + + self.assertEqual(regex.sub('a', r'\t\n\v\r\f\a\b', 'a'), "\t\n\v\r\f\a\b") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), "\t\n\v\r\f\a") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7)) + + self.assertEqual(regex.sub(r'^\s*', 'X', 'test'), 'Xtest') + + self.assertEqual(regex.sub(r"x", r"\x0A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\u000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\U0000000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\N{LATIN CAPITAL LETTER A}", + "x"), "A") + + self.assertEqual(regex.sub(br"x", br"\x0A", b"x"), b"\n") + + def test_bug_449964(self): + # Fails for group followed by other escape. + self.assertEqual(regex.sub(r'(?Px)', r'\g<1>\g<1>\b', 'xx'), + "xx\bxx\b") + + def test_bug_449000(self): + # Test for sub() on escaped characters. + self.assertEqual(regex.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + + def test_bug_1661(self): + # Verify that flags do not get silently ignored with compiled patterns + pattern = regex.compile('.') + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.match(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.search(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.findall(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.compile(pattern, regex.I)) + + def test_bug_3629(self): + # A regex that triggered a bug in the sre-code validator + self.assertEqual(repr(type(regex.compile("(?P)(?(quote))"))), + self.PATTERN_CLASS) + + def test_sub_template_numeric_escape(self): + # Bug 776311 and friends. + self.assertEqual(regex.sub('x', r'\0', 'x'), "\0") + self.assertEqual(regex.sub('x', r'\000', 'x'), "\000") + self.assertEqual(regex.sub('x', r'\001', 'x'), "\001") + self.assertEqual(regex.sub('x', r'\008', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\009', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\111', 'x'), "\111") + self.assertEqual(regex.sub('x', r'\117', 'x'), "\117") + + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\1111") + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\111" + "1") + + self.assertEqual(regex.sub('x', r'\00', 'x'), '\x00') + self.assertEqual(regex.sub('x', r'\07', 'x'), '\x07') + self.assertEqual(regex.sub('x', r'\08', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\09', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\0a', 'x'), "\0" + "a") + + self.assertEqual(regex.sub('x', r'\400', 'x'), "\u0100") + self.assertEqual(regex.sub('x', r'\777', 'x'), "\u01FF") + self.assertEqual(regex.sub(b'x', br'\400', b'x'), b"\x00") + self.assertEqual(regex.sub(b'x', br'\777', b'x'), b"\xFF") + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\8', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\9', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\18', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\90', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\99', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\118', 'x')) # r'\11' + '8' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\181', 'x')) # r'\18' + '1' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\800', 'x')) # r'\80' + '0' + + # In Python 2.3 (etc), these loop endlessly in sre_parser.py. + self.assertEqual(regex.sub('(((((((((((x)))))))))))', r'\11', 'x'), + 'x') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), + 'xz8') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), + 'xza') + + def test_qualified_re_sub(self): + self.assertEqual(regex.sub('a', 'b', 'aaaaa'), 'bbbbb') + self.assertEqual(regex.sub('a', 'b', 'aaaaa', 1), 'baaaa') + + def test_bug_114660(self): + self.assertEqual(regex.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), + 'hello there') + + def test_bug_462270(self): + # Test for empty sub() behaviour, see SF bug #462270 + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b--d-') + else: + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b-d-') + self.assertEqual(regex.sub('(?V1)x*', '-', 'abxd'), '-a-b--d-') + self.assertEqual(regex.sub('x+', '-', 'abxd'), 'ab-d') + + def test_bug_14462(self): + # chr(255) is a valid identifier in Python 3. + group_name = '\xFF' + self.assertEqual(regex.search(r'(?P<' + group_name + '>a)', + 'abc').group(group_name), 'a') + + def test_symbolic_refs(self): + self.assertRaisesRegex(regex.error, self.MISSING_GT, lambda: + regex.sub('(?Px)', r'\gx)', r'\g<', 'xx')) + self.assertRaisesRegex(regex.error, self.MISSING_LT, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<1a1>', 'xx')) + self.assertRaisesRegex(IndexError, self.UNKNOWN_GROUP_I, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + + # The new behaviour of unmatched but valid groups is to treat them like + # empty matches in the replacement template, like in Perl. + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\g', 'xx'), '') + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\2', 'xx'), '') + + # The old behaviour was to raise it as an IndexError. + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<-1>', 'xx')) + + def test_re_subn(self): + self.assertEqual(regex.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) + self.assertEqual(regex.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) + self.assertEqual(regex.subn("b+", "x", "xyz"), ('xyz', 0)) + self.assertEqual(regex.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) + self.assertEqual(regex.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) + + def test_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c']) + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(":*", ":a:b::c"), ['', '', 'a', '', + 'b', '', 'c', '']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', '', '', + 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', '', + None, 'a', ':', '', None, 'b', ':', '', None, 'c', None, '']) + else: + self.assertEqual(regex.split(":*", ":a:b::c"), ['', 'a', 'b', 'c']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', 'a', + ':', 'b', '::', 'c']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', 'a', 'b', + 'c']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', 'a', + ':', 'b', ':', 'c']) + self.assertEqual(regex.split("([b:]+)", ":a:b::c"), ['', ':', 'a', + ':b::', 'c']) + self.assertEqual(regex.split("(b)|(:+)", ":a:b::c"), ['', None, ':', + 'a', None, ':', '', 'b', None, '', None, '::', 'c']) + self.assertEqual(regex.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', + '', 'c']) + + self.assertEqual(regex.split("x", "xaxbxc"), ['', 'a', 'b', 'c']) + self.assertEqual([m for m in regex.splititer("x", "xaxbxc")], ['', 'a', + 'b', 'c']) + + self.assertEqual(regex.split("(?r)x", "xaxbxc"), ['c', 'b', 'a', '']) + self.assertEqual([m for m in regex.splititer("(?r)x", "xaxbxc")], ['c', + 'b', 'a', '']) + + self.assertEqual(regex.split("(x)|(y)", "xaxbxc"), ['', 'x', None, 'a', + 'x', None, 'b', 'x', None, 'c']) + self.assertEqual([m for m in regex.splititer("(x)|(y)", "xaxbxc")], + ['', 'x', None, 'a', 'x', None, 'b', 'x', None, 'c']) + + self.assertEqual(regex.split("(?r)(x)|(y)", "xaxbxc"), ['c', 'x', None, + 'b', 'x', None, 'a', 'x', None, '']) + self.assertEqual([m for m in regex.splititer("(?r)(x)|(y)", "xaxbxc")], + ['c', 'x', None, 'b', 'x', None, 'a', 'x', None, '']) + + self.assertEqual(regex.split(r"(?V1)\b", "a b c"), ['', 'a', ' ', 'b', + ' ', 'c', '']) + self.assertEqual(regex.split(r"(?V1)\m", "a b c"), ['', 'a ', 'b ', + 'c']) + self.assertEqual(regex.split(r"(?V1)\M", "a b c"), ['a', ' b', ' c', + '']) + + def test_qualified_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) + self.assertEqual(regex.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) + self.assertEqual(regex.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', + 'b::c']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', '', + '', 'a:b::c']) + else: + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', 'a', + ':', 'b::c']) + + def test_re_findall(self): + self.assertEqual(regex.findall(":+", "abc"), []) + self.assertEqual(regex.findall(":+", "a:b::c:::d"), [':', '::', ':::']) + self.assertEqual(regex.findall("(:+)", "a:b::c:::d"), [':', '::', + ':::']) + self.assertEqual(regex.findall("(:)(:*)", "a:b::c:::d"), [(':', ''), + (':', ':'), (':', '::')]) + + self.assertEqual(regex.findall(r"\((?P.{0,5}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?T)\)", "(MY T)"), + ["MY T"]) + + self.assertEqual(regex.findall(r"[^a]{2}[A-Z]", "\n S"), [' S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), ['\n S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), [' S']) + + self.assertEqual(regex.findall(r"X(Y[^Y]+?){1,2}( |Q)+DEF", + "XYABCYPPQ\nQ DEF"), [('YPPQ\n', ' ')]) + + self.assertEqual(regex.findall(r"(\nTest(\n+.+?){0,2}?)?\n+End", + "\nTest\nxyz\nxyz\nEnd"), [('\nTest\nxyz\nxyz', '\nxyz')]) + + def test_bug_117612(self): + self.assertEqual(regex.findall(r"(a|(b))", "aba"), [('a', ''), ('b', + 'b'), ('a', '')]) + + def test_re_match(self): + self.assertEqual(regex.match('a', 'a')[:], ('a',)) + self.assertEqual(regex.match('(a)', 'a')[:], ('a', 'a')) + self.assertEqual(regex.match(r'(a)', 'a')[0], 'a') + self.assertEqual(regex.match(r'(a)', 'a')[1], 'a') + self.assertEqual(regex.match(r'(a)', 'a').group(1, 1), ('a', 'a')) + + pat = regex.compile('((a)|(b))(c)?') + self.assertEqual(pat.match('a')[:], ('a', 'a', 'a', None, None)) + self.assertEqual(pat.match('b')[:], ('b', 'b', None, 'b', None)) + self.assertEqual(pat.match('ac')[:], ('ac', 'a', 'a', None, 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + + # A single group. + m = regex.match('(a)', 'a') + self.assertEqual(m.group(), 'a') + self.assertEqual(m.group(0), 'a') + self.assertEqual(m.group(1), 'a') + self.assertEqual(m.group(1, 1), ('a', 'a')) + + pat = regex.compile('(?:(?Pa)|(?Pb))(?Pc)?') + self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) + self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', + None)) + self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) + + def test_re_groupref_exists(self): + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a)')[:], + ('(a)', '(', 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a)'), None) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'ab')[:], ('ab', + 'a', 'b')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'a')[:], ('a', + 'a', '')) + + # Tests for bug #1177831: exercise groups other than the first group. + p = regex.compile('(?Pa)(?Pb)?((?(g2)c|d))') + self.assertEqual(p.match('abc')[:], ('abc', 'a', 'b', 'c')) + self.assertEqual(p.match('ad')[:], ('ad', 'a', None, 'd')) + self.assertEqual(p.match('abd'), None) + self.assertEqual(p.match('ac'), None) + + def test_re_groupref(self): + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a|')[:], ('|a|', + '|', 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1?$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', 'a|'), None) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)$', 'aa')[:], ('aa', 'a', + 'a')) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)?$', 'c')[:], ('c', None, + None)) + + self.assertEqual(regex.findall(r"(?i)(.{1,40}?),(.{1,40}?)(?:;)+(.{1,80}).{1,40}?\3(\ |;)+(.{1,80}?)\1", + "TEST, BEST; LEST ; Lest 123 Test, Best"), [('TEST', ' BEST', + ' LEST', ' ', '123 ')]) + + def test_groupdict(self): + self.assertEqual(regex.match('(?Pfirst) (?Psecond)', + 'first second').groupdict(), {'first': 'first', 'second': 'second'}) + + def test_expand(self): + self.assertEqual(regex.match("(?Pfirst) (?Psecond)", + "first second").expand(r"\2 \1 \g \g"), + 'second first second first') + + def test_repeat_minmax(self): + self.assertEqual(regex.match(r"^(\w){1}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1}?$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}?$", "abc"), None) + + self.assertEqual(regex.match(r"^(\w){3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + + self.assertEqual(regex.match("^x{1}$", "xxx"), None) + self.assertEqual(regex.match("^x{1}?$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}?$", "xxx"), None) + + self.assertEqual(regex.match("^x{1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{1}?", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}?", "xxx")[0], '') + + self.assertEqual(bool(regex.match("^x{3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + + self.assertEqual(regex.match("^x{}$", "xxx"), None) + self.assertEqual(bool(regex.match("^x{}$", "x{}")), True) + + def test_getattr(self): + self.assertEqual(regex.compile("(?i)(a)(b)").pattern, '(?i)(a)(b)') + self.assertEqual(regex.compile("(?i)(a)(b)").flags, regex.I | regex.U | + regex.DEFAULT_VERSION) + self.assertEqual(regex.compile(b"(?i)(a)(b)").flags, regex.A | regex.I + | regex.DEFAULT_VERSION) + self.assertEqual(regex.compile("(?i)(a)(b)").groups, 2) + self.assertEqual(regex.compile("(?i)(a)(b)").groupindex, {}) + + self.assertEqual(regex.compile("(?i)(?Pa)(?Pb)").groupindex, + {'first': 1, 'other': 2}) + + self.assertEqual(regex.match("(a)", "a").pos, 0) + self.assertEqual(regex.match("(a)", "a").endpos, 1) + + self.assertEqual(regex.search("b(c)", "abcdef").pos, 0) + self.assertEqual(regex.search("b(c)", "abcdef").endpos, 6) + self.assertEqual(regex.search("b(c)", "abcdef").span(), (1, 3)) + self.assertEqual(regex.search("b(c)", "abcdef").span(1), (2, 3)) + + self.assertEqual(regex.match("(a)", "a").string, 'a') + self.assertEqual(regex.match("(a)", "a").regs, ((0, 1), (0, 1))) + self.assertEqual(repr(type(regex.match("(a)", "a").re)), + self.PATTERN_CLASS) + + # Issue 14260. + p = regex.compile(r'abc(?Pdef)') + p.groupindex["n"] = 0 + self.assertEqual(p.groupindex["n"], 1) + + def test_special_escapes(self): + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx")[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd")[1], 'bx') + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx", + regex.UNICODE)[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd", + regex.UNICODE)[1], 'bx') + + self.assertEqual(regex.search(r"^abc$", "\nabc\n", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "abc", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "\nabc\n", regex.M), None) + + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx")[1], + b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd")[1], + b'bx') + self.assertEqual(regex.search(br"^abc$", b"\nabc\n", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"abc", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"\nabc\n", regex.M), + None) + + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a")[0], '1aa! a') + self.assertEqual(regex.search(br"\d\D\w\W\s\S", b"1aa! a", + regex.LOCALE)[0], b'1aa! a') + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a", + regex.UNICODE)[0], '1aa! a') + + def test_bigcharset(self): + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222")[1], + '\u2222') + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222", + regex.UNICODE)[1], '\u2222') + self.assertEqual("".join(regex.findall(".", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"[e\xe8\xe9\xea\xeb\u0113\u011b\u0117]", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"e|\xe8|\xe9|\xea|\xeb|\u0113|\u011b|\u0117", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + + def test_anyall(self): + self.assertEqual(regex.match("a.b", "a\nb", regex.DOTALL)[0], "a\nb") + self.assertEqual(regex.match("a.*b", "a\n\nb", regex.DOTALL)[0], + "a\n\nb") + + def test_non_consuming(self): + self.assertEqual(regex.match(r"(a(?=\s[^a]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[^a]*))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]*))", "a bc")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1)", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1*)", "a aa")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s(abc|a))", "a a")[1], 'a') + + self.assertEqual(regex.match(r"(a(?!\s[^a]))", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a(?!\s[abc]))", "a d")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s\1)", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s(abc|a))", "a b")[1], 'a') + + def test_ignore_case(self): + self.assertEqual(regex.match("abc", "ABC", regex.I)[0], 'ABC') + self.assertEqual(regex.match(b"abc", b"ABC", regex.I)[0], b'ABC') + + self.assertEqual(regex.match(r"(a\s[^a]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"(a\s[abc])", "a b", regex.I)[1], 'a b') + self.assertEqual(regex.match(r"(a\s[abc]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"((a)\s\2)", "a a", regex.I)[1], 'a a') + self.assertEqual(regex.match(r"((a)\s\2*)", "a aa", regex.I)[1], + 'a aa') + self.assertEqual(regex.match(r"((a)\s(abc|a))", "a a", regex.I)[1], + 'a a') + self.assertEqual(regex.match(r"((a)\s(abc|a)*)", "a aa", regex.I)[1], + 'a aa') + + # Issue 3511. + self.assertEqual(regex.match(r"[Z-a]", "_").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[Z-a]", "_").span(), (0, 1)) + + self.assertEqual(bool(regex.match(r"(?i)nao", "nAo")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "n\xC3o")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "N\xC3O")), True) + self.assertEqual(bool(regex.match(r"(?i)s", "\u017F")), True) + + def test_case_folding(self): + self.assertEqual(regex.search(r"(?fi)ss", "SS").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", "ss").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", + "\N{LATIN SMALL LETTER SHARP S}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LETTER SHARP S}", + "SS").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "ST").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE ST}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 1)) + + self.assertEqual(regex.search(r"(?fi)SST", + "\N{LATIN SMALL LETTER SHARP S}t").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "SST").span(), (1, 3)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FFI}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FF}i").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)FFI", + "f\N{LATIN SMALL LIGATURE FI}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FFI}", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FF}i", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)f\N{LATIN SMALL LIGATURE FI}", + "FFI").span(), (0, 3)) + + sigma = "\u03A3\u03C3\u03C2" + for ch1 in sigma: + for ch2 in sigma: + if not regex.match(r"(?fi)" + ch1, ch2): + self.fail() + + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + + self.assertEqual(regex.findall(r"(?iV0)\m(?:word){e<=3}\M(?ne", "affine", + options=["\N{LATIN SMALL LIGATURE FFI}"]).span(), (0, 6)) + self.assertEqual(regex.search(r"(?fi)a\Lne", + "a\N{LATIN SMALL LIGATURE FFI}ne", options=["ffi"]).span(), (0, 4)) + + def test_category(self): + self.assertEqual(regex.match(r"(\s)", " ")[1], ' ') + + def test_not_literal(self): + self.assertEqual(regex.search(r"\s([^a])", " b")[1], 'b') + self.assertEqual(regex.search(r"\s([^a]*)", " bb")[1], 'bb') + + def test_search_coverage(self): + self.assertEqual(regex.search(r"\s(b)", " b")[1], 'b') + self.assertEqual(regex.search(r"a\s", "a ")[0], 'a ') + + def test_re_escape(self): + p = "" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + p += chr(i) + self.assertEqual(bool(regex.match(regex.escape(chr(i)), chr(i))), + True) + self.assertEqual(regex.match(regex.escape(chr(i)), chr(i)).span(), + (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_re_escape_byte(self): + p = b"" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + b = bytes([i]) + p += b + self.assertEqual(bool(regex.match(regex.escape(b), b)), True) + self.assertEqual(regex.match(regex.escape(b), b).span(), (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_constants(self): + if regex.I != regex.IGNORECASE: + self.fail() + if regex.L != regex.LOCALE: + self.fail() + if regex.M != regex.MULTILINE: + self.fail() + if regex.S != regex.DOTALL: + self.fail() + if regex.X != regex.VERBOSE: + self.fail() + + def test_flags(self): + for flag in [regex.I, regex.M, regex.X, regex.S, regex.L]: + self.assertEqual(repr(type(regex.compile('^pattern$', flag))), + self.PATTERN_CLASS) + + def test_sre_character_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"\%03o" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\%03o0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\%03o8" % i, chr(i) + "8")), + True) + self.assertEqual(bool(regex.match(r"\x%02x" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\x%02x0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\x%02xz" % i, chr(i) + "z")), + True) + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.match(r"\911", "")) + + def test_sre_character_class_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"[\%03o]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o8]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02xz]" % i, chr(i))), True) + + self.assertRaisesRegex(regex.error, self.BAD_OCTAL_ESCAPE, lambda: + regex.match(r"[\911]", "")) + + def test_bug_113254(self): + self.assertEqual(regex.match(r'(a)|(b)', 'b').start(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').end(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').span(1), (-1, -1)) + + def test_bug_527371(self): + # Bug described in patches 527371/672491. + self.assertEqual(regex.match(r'(a)?a','a').lastindex, None) + self.assertEqual(regex.match(r'(a)(b)?b','ab').lastindex, 1) + self.assertEqual(regex.match(r'(?Pa)(?Pb)?b','ab').lastgroup, + 'a') + self.assertEqual(regex.match("(?Pa(b))", "ab").lastgroup, 'a') + self.assertEqual(regex.match("((a))", "a").lastindex, 1) + + def test_bug_545855(self): + # Bug 545855 -- This pattern failed to cause a compile error as it + # should, instead provoking a TypeError. + self.assertRaisesRegex(regex.error, self.BAD_SET, lambda: + regex.compile('foo[a-')) + + def test_bug_418626(self): + # Bugs 418626 at al. -- Testing Greg Chapman's addition of op code + # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of + # pattern '*?' on a long string. + self.assertEqual(regex.match('.*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + self.assertEqual(regex.match('.*?cd', 5000 * 'ab' + 'c' + 5000 * 'ab' + + 'cde').end(0), 20003) + self.assertEqual(regex.match('.*?cd', 20000 * 'abc' + 'de').end(0), + 60001) + # Non-simple '*?' still used to hit the recursion limit, before the + # non-recursive scheme was implemented. + self.assertEqual(regex.search('(a|b)*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + + def test_bug_612074(self): + pat = "[" + regex.escape("\u2039") + "]" + self.assertEqual(regex.compile(pat) and 1, 1) + + def test_stack_overflow(self): + # Nasty cases that used to overflow the straightforward recursive + # implementation of repeated groups. + self.assertEqual(regex.match('(x)*', 50000 * 'x')[1], 'x') + self.assertEqual(regex.match('(x)*y', 50000 * 'x' + 'y')[1], 'x') + self.assertEqual(regex.match('(x)*?y', 50000 * 'x' + 'y')[1], 'x') + + def test_scanner(self): + def s_ident(scanner, token): return token + def s_operator(scanner, token): return "op%s" % token + def s_float(scanner, token): return float(token) + def s_int(scanner, token): return int(token) + + scanner = regex.Scanner([(r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", + s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", + None), ]) + + self.assertEqual(repr(type(scanner.scanner.scanner("").pattern)), + self.PATTERN_CLASS) + + self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', + 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) + + def test_bug_448951(self): + # Bug 448951 (similar to 429357, but with single char match). + # (Also test greedy matches.) + for op in '', '?', '*': + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'z')[:], ('z', + None, None)) + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'a:z')[:], ('a:z', + 'a:', 'a')) + + def test_bug_725106(self): + # Capturing groups in alternatives in repeats. + self.assertEqual(regex.match('^((a)|b)*', 'abc')[:], ('ab', 'b', 'a')) + self.assertEqual(regex.match('^(([ab])|c)*', 'abc')[:], ('abc', 'c', + 'b')) + self.assertEqual(regex.match('^((d)|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)|b)*?c', 'abc')[:], ('abc', 'b', + 'a')) + self.assertEqual(regex.match('^(([ab])|c)*?d', 'abcd')[:], ('abcd', + 'c', 'b')) + self.assertEqual(regex.match('^((d)|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + + def test_bug_725149(self): + # Mark_stack_base restoring before restoring marks. + self.assertEqual(regex.match('(a)(?:(?=(b)*)c)*', 'abb')[:], ('a', 'a', + None)) + self.assertEqual(regex.match('(a)((?!(b)*))*', 'abb')[:], ('a', 'a', + None, None)) + + def test_bug_764548(self): + # Bug 764548, regex.compile() barfs on str/unicode subclasses. + class my_unicode(str): pass + pat = regex.compile(my_unicode("abc")) + self.assertEqual(pat.match("xyz"), None) + + def test_finditer(self): + it = regex.finditer(r":+", "a:b::c:::d") + self.assertEqual([item[0] for item in it], [':', '::', ':::']) + + def test_bug_926075(self): + if regex.compile('bug_926075') is regex.compile(b'bug_926075'): + self.fail() + + def test_bug_931848(self): + pattern = "[\u002E\u3002\uFF0E\uFF61]" + self.assertEqual(regex.compile(pattern).split("a.b.c"), ['a', 'b', + 'c']) + + def test_bug_581080(self): + it = regex.finditer(r"\s", "a b") + self.assertEqual(next(it).span(), (1, 2)) + self.assertRaises(StopIteration, lambda: next(it)) + + scanner = regex.compile(r"\s").scanner("a b") + self.assertEqual(scanner.search().span(), (1, 2)) + self.assertEqual(scanner.search(), None) + + def test_bug_817234(self): + it = regex.finditer(r".*", "asdf") + self.assertEqual(next(it).span(), (0, 4)) + self.assertEqual(next(it).span(), (4, 4)) + self.assertRaises(StopIteration, lambda: next(it)) + + def test_empty_array(self): + # SF buf 1647541. + import array + for typecode in 'bBuhHiIlLfd': + a = array.array(typecode) + self.assertEqual(regex.compile(b"bla").match(a), None) + self.assertEqual(regex.compile(b"").match(a)[1 : ], ()) + + def test_inline_flags(self): + # Bug #1700. + upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Below + lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Below + + p = regex.compile(upper_char, regex.I | regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile(lower_char, regex.I | regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?i)' + upper_char, regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?i)' + lower_char, regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?iu)' + upper_char) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?iu)' + lower_char) + self.assertEqual(bool(p.match(upper_char)), True) + + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(bool(regex.match(r"(?i)a", "A")), True) + self.assertEqual(regex.match(r"a(?i)", "A"), None) + + def test_dollar_matches_twice(self): + # $ matches the end of string, and just before the terminating \n. + pattern = regex.compile('$') + self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + pattern = regex.compile('$', regex.MULTILINE) + self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + def test_bytes_str_mixing(self): + # Mixing str and bytes is disallowed. + pat = regex.compile('.') + bpat = regex.compile(b'.') + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.match(b'b')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.match('b')) + self.assertRaisesRegex(TypeError, self.STR_PAT_BYTES_TEMPL, lambda: + pat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub(b'b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_STR_TEMPL, lambda: + bpat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub('b', 'c')) + + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'(?u)\w')) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'\w', regex.UNICODE | regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?u)\w', regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?a)\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_ascii_and_unicode_flag(self): + # String patterns. + for flags in (0, regex.UNICODE): + pat = regex.compile('\xc0', flags | regex.IGNORECASE) + self.assertEqual(bool(pat.match('\xe0')), True) + pat = regex.compile(r'\w', flags) + self.assertEqual(bool(pat.match('\xe0')), True) + + pat = regex.compile('\xc0', regex.ASCII | regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile('(?a)\xc0', regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'\w', regex.ASCII) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'(?a)\w') + self.assertEqual(pat.match('\xe0'), None) + + # Bytes patterns. + for flags in (0, regex.ASCII): + pat = regex.compile(b'\xc0', flags | regex.IGNORECASE) + self.assertEqual(pat.match(b'\xe0'), None) + pat = regex.compile(br'\w') + self.assertEqual(pat.match(b'\xe0'), None) + + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_subscripting_match(self): + m = regex.match(r'(?\w)', 'xy') + if not m: + self.fail("Failed: expected match but returned None") + elif not m or m[0] != m.group(0) or m[1] != m.group(1): + self.fail("Failed") + if not m: + self.fail("Failed: expected match but returned None") + elif m[:] != ('x', 'x'): + self.fail("Failed: expected \"('x', 'x')\" but got {} instead".format(ascii(m[:]))) + + def test_new_named_groups(self): + m0 = regex.match(r'(?P\w)', 'x') + m1 = regex.match(r'(?\w)', 'x') + if not (m0 and m1 and m0[:] == m1[:]): + self.fail("Failed") + + def test_properties(self): + self.assertEqual(regex.match(b'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?a)\w', b'\xE0'), None) + self.assertEqual(bool(regex.match(r'\w', '\xE0')), True) + + # Dropped the following test. It's not possible to determine what the + # correct result should be in the general case. +# self.assertEqual(bool(regex.match(br'(?L)\w', b'\xE0')), +# b'\xE0'.isalnum()) + + self.assertEqual(bool(regex.match(br'(?L)\d', b'0')), True) + self.assertEqual(bool(regex.match(br'(?L)\s', b' ')), True) + self.assertEqual(bool(regex.match(br'(?L)\w', b'a')), True) + self.assertEqual(regex.match(br'(?L)\d', b'?'), None) + self.assertEqual(regex.match(br'(?L)\s', b'?'), None) + self.assertEqual(regex.match(br'(?L)\w', b'?'), None) + + self.assertEqual(regex.match(br'(?L)\D', b'0'), None) + self.assertEqual(regex.match(br'(?L)\S', b' '), None) + self.assertEqual(regex.match(br'(?L)\W', b'a'), None) + self.assertEqual(bool(regex.match(br'(?L)\D', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\S', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\W', b'?')), True) + + self.assertEqual(bool(regex.match(r'\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'(?i)\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{IsCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Script=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{InCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Block=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:IsCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Script=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:InCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Block=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\P{Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^IsCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Script=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^InCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Block=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\d', '0')), True) + self.assertEqual(bool(regex.match(r'\s', ' ')), True) + self.assertEqual(bool(regex.match(r'\w', 'A')), True) + self.assertEqual(regex.match(r"\d", "?"), None) + self.assertEqual(regex.match(r"\s", "?"), None) + self.assertEqual(regex.match(r"\w", "?"), None) + self.assertEqual(regex.match(r"\D", "0"), None) + self.assertEqual(regex.match(r"\S", " "), None) + self.assertEqual(regex.match(r"\W", "A"), None) + self.assertEqual(bool(regex.match(r'\D', '?')), True) + self.assertEqual(bool(regex.match(r'\S', '?')), True) + self.assertEqual(bool(regex.match(r'\W', '?')), True) + + self.assertEqual(bool(regex.match(r'\p{L}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{L}', 'a')), True) + self.assertEqual(bool(regex.match(r'\p{Lu}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + self.assertEqual(bool(regex.match(r'(?i)a', 'a')), True) + self.assertEqual(bool(regex.match(r'(?i)a', 'A')), True) + + self.assertEqual(bool(regex.match(r'\w', '0')), True) + self.assertEqual(bool(regex.match(r'\w', 'a')), True) + self.assertEqual(bool(regex.match(r'\w', '_')), True) + + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + chars_u = "-09AZaz_\u0393\u03b3" + chars_b = b"-09AZaz_" + word_set = set("Ll Lm Lo Lt Lu Mc Me Mn Nd Nl No Pc".split()) + + tests = [ + (r"\w", chars_u, "09AZaz_\u0393\u03b3"), + (r"[[:word:]]", chars_u, "09AZaz_\u0393\u03b3"), + (r"\W", chars_u, "-"), + (r"[[:^word:]]", chars_u, "-"), + (r"\d", chars_u, "09"), + (r"[[:digit:]]", chars_u, "09"), + (r"\D", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:^digit:]]", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:alpha:]]", chars_u, "AZaz\u0393\u03b3"), + (r"[[:^alpha:]]", chars_u, "-09_"), + (r"[[:alnum:]]", chars_u, "09AZaz\u0393\u03b3"), + (r"[[:^alnum:]]", chars_u, "-_"), + (r"[[:xdigit:]]", chars_u, "09Aa"), + (r"[[:^xdigit:]]", chars_u, "-Zz_\u0393\u03b3"), + (r"\p{InBasicLatin}", "a\xE1", "a"), + (r"\P{InBasicLatin}", "a\xE1", "\xE1"), + (r"(?i)\p{InBasicLatin}", "a\xE1", "a"), + (r"(?i)\P{InBasicLatin}", "a\xE1", "\xE1"), + + (br"(?L)\w", chars_b, b"09AZaz_"), + (br"(?L)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?L)\W", chars_b, b"-"), + (br"(?L)[[:^word:]]", chars_b, b"-"), + (br"(?L)\d", chars_b, b"09"), + (br"(?L)[[:digit:]]", chars_b, b"09"), + (br"(?L)\D", chars_b, b"-AZaz_"), + (br"(?L)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?L)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?L)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?L)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?L)[[:^alnum:]]", chars_b, b"-_"), + (br"(?L)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?L)[[:^xdigit:]]", chars_b, b"-Zz_"), + + (br"(?a)\w", chars_b, b"09AZaz_"), + (br"(?a)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?a)\W", chars_b, b"-"), + (br"(?a)[[:^word:]]", chars_b, b"-"), + (br"(?a)\d", chars_b, b"09"), + (br"(?a)[[:digit:]]", chars_b, b"09"), + (br"(?a)\D", chars_b, b"-AZaz_"), + (br"(?a)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?a)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?a)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?a)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?a)[[:^alnum:]]", chars_b, b"-_"), + (br"(?a)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?a)[[:^xdigit:]]", chars_b, b"-Zz_"), + ] + for pattern, chars, expected in tests: + try: + if chars[ : 0].join(regex.findall(pattern, chars)) != expected: + self.fail("Failed: {}".format(pattern)) + except Exception as e: + self.fail("Failed: {} raised {}".format(pattern, ascii(e))) + + self.assertEqual(bool(regex.match(r"\p{NumericValue=0}", "0")), + True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=1/2}", + "\N{VULGAR FRACTION ONE HALF}")), True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=0.5}", + "\N{VULGAR FRACTION ONE HALF}")), True) + + def test_word_class(self): + self.assertEqual(regex.findall(r"\w+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), + ['\u0939\u093f\u0928\u094d\u0926\u0940']) + self.assertEqual(regex.findall(r"\W+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', ',']) + self.assertEqual(regex.split(r"(?V1)\b", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', + '\u0939\u093f\u0928\u094d\u0926\u0940', ',']) + self.assertEqual(regex.split(r"(?V1)\B", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), ['', ' \u0939', + '\u093f', '\u0928', '\u094d', '\u0926', '\u0940,', '']) + + def test_search_anchor(self): + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + + def test_search_reverse(self): + self.assertEqual(regex.findall(r"(?r).", "abc"), ['c', 'b', 'a']) + self.assertEqual(regex.findall(r"(?r).", "abc", overlapped=True), ['c', + 'b', 'a']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(?r)(.)(-)(.)", "a-b-c", + overlapped=True), [("b", "-", "c"), ("a", "-", "b")]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', 'foo', + '']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + self.assertEqual(regex.findall(r".{2}(?<=\G.*)", "abcd"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"(?r)\G\w{2}", "abcd ef"), []) + self.assertEqual(regex.findall(r"(?r)\w{2}\G", "abcd ef"), ['ef']) + + self.assertEqual(regex.findall(r"q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?V1)q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?r)q*", "qqwe"), ['', '', 'qq', '']) + self.assertEqual(regex.findall(r"(?rV1)q*", "qqwe"), ['', '', 'qq', + '']) + + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=3), ['b', + 'c']) + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=-1), ['b', + 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=3)], ['b', 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=-1)], ['b', 'c']) + + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=3)], ['c', 'b']) + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=-1)], ['c', 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=3), ['c', + 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=-1), + ['c', 'b']) + + self.assertEqual(regex.findall(r"[ab]", "aB", regex.I), ['a', 'B']) + self.assertEqual(regex.findall(r"(?r)[ab]", "aB", regex.I), ['B', 'a']) + + self.assertEqual(regex.findall(r"(?r).{2}", "abc"), ['bc']) + self.assertEqual(regex.findall(r"(?r).{2}", "abc", overlapped=True), + ['bc', 'ab']) + self.assertEqual(regex.findall(r"(\w+) (\w+)", + "first second third fourth fifth"), [('first', 'second'), ('third', + 'fourth')]) + self.assertEqual(regex.findall(r"(?r)(\w+) (\w+)", + "first second third fourth fifth"), [('fourth', 'fifth'), ('second', + 'third')]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc")], + ['bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc", + overlapped=True)], ['bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(\w+) (\w+)", + "first second third fourth fifth")], ['first second', + 'third fourth']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)(\w+) (\w+)", + "first second third fourth fifth")], ['fourth fifth', + 'second third']) + + self.assertEqual(regex.search("abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?r)abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?i)abcdef", "ABCDEF").span(), (0, 6)) + self.assertEqual(regex.search("(?ir)abcdef", "ABCDEF").span(), (0, 6)) + + self.assertEqual(regex.sub(r"(.)", r"\1", "abc"), 'abc') + self.assertEqual(regex.sub(r"(?r)(.)", r"\1", "abc"), 'abc') + + def test_atomic(self): + # Issue 433030. + self.assertEqual(regex.search(r"(?>a*)a", "aa"), None) + + def test_possessive(self): + # Single-character non-possessive. + self.assertEqual(regex.search(r"a?a", "a").span(), (0, 1)) + self.assertEqual(regex.search(r"a*a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a+a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a{1,3}a", "aaa").span(), (0, 3)) + + # Multiple-character non-possessive. + self.assertEqual(regex.search(r"(?:ab)?ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?:ab)*ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab)+ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab){1,3}ab", "ababab").span(), (0, + 6)) + + # Single-character possessive. + self.assertEqual(regex.search(r"a?+a", "a"), None) + self.assertEqual(regex.search(r"a*+a", "aaa"), None) + self.assertEqual(regex.search(r"a++a", "aaa"), None) + self.assertEqual(regex.search(r"a{1,3}+a", "aaa"), None) + + # Multiple-character possessive. + self.assertEqual(regex.search(r"(?:ab)?+ab", "ab"), None) + self.assertEqual(regex.search(r"(?:ab)*+ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab)++ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab){1,3}+ab", "ababab"), None) + + def test_zerowidth(self): + # Issue 3262. + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r"\b", "a b"), ['', 'a', ' ', 'b', + '']) + else: + self.assertEqual(regex.split(r"\b", "a b"), ['a b']) + self.assertEqual(regex.split(r"(?V1)\b", "a b"), ['', 'a', ' ', 'b', + '']) + + # Issue 1647489. + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + else: + self.assertEqual(regex.split("", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], + ['xaxbxc']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['', 'c', 'x', 'b', 'x', 'a', 'x', '']) + else: + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['xaxbxc']) + + self.assertEqual(regex.split("(?V1)", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("(?V1)", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + + self.assertEqual(regex.split("(?rV1)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?rV1)", "xaxbxc")], ['', + 'c', 'x', 'b', 'x', 'a', 'x', '']) + + def test_scoped_and_inline_flags(self): + # Issues 433028, 433024, 433027. + self.assertEqual(regex.search(r"(?i)Ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?i:A)b", "ab").span(), (0, 2)) + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"A(?i)b", "ab"), None) + + self.assertEqual(regex.search(r"(?V0)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?V1)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?-i)Ab", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"(?-i:A)b", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"A(?-i)b", "ab", flags=regex.I).span(), + (0, 2)) + + def test_repeated_repeats(self): + # Issue 2537. + self.assertEqual(regex.search(r"(?:a+)+", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"(?:(?:ab)+c)+", "abcabc").span(), (0, + 6)) + + # Hg issue 286. + self.assertEqual(regex.search(r"(?:a+){2,}", "aaa").span(), (0, 3)) + + def test_lookbehind(self): + self.assertEqual(regex.search(r"123(?<=a\d+)", "a123").span(), (1, 4)) + self.assertEqual(regex.search(r"123(?<=a\d+)", "b123"), None) + self.assertEqual(regex.search(r"123(?= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "xy"), 'y-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "x"), '-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y--') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y-') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "y"), 'y--') + + def test_bug_10328 (self): + # Issue 10328. + pat = regex.compile(r'(?mV0)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + if sys.version_info >= (3, 7, 0): + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + else: + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 1)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + pat = regex.compile(r'(?mV1)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + + def test_overlapped(self): + self.assertEqual(regex.findall(r"..", "abcde"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"..", "abcde", overlapped=True), ['ab', + 'bc', 'cd', 'de']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(.)(-)(.)", "a-b-c", overlapped=True), + [("a", "-", "b"), ("b", "-", "c")]) + + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde")], ['ab', + 'cd']) + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde", + overlapped=True)], ['ab', 'bc', 'cd', 'de']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde")], + ['de', 'bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual([m.groups() for m in regex.finditer(r"(.)(-)(.)", + "a-b-c", overlapped=True)], [("a", "-", "b"), ("b", "-", "c")]) + self.assertEqual([m.groups() for m in regex.finditer(r"(?r)(.)(-)(.)", + "a-b-c", overlapped=True)], [("b", "-", "c"), ("a", "-", "b")]) + + def test_splititer(self): + self.assertEqual(regex.split(r",", "a,b,,c,"), ['a', 'b', '', 'c', '']) + self.assertEqual([m for m in regex.splititer(r",", "a,b,,c,")], ['a', + 'b', '', 'c', '']) + + def test_grapheme(self): + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + def test_word_boundary(self): + text = 'The quick ("brown") fox can\'t jump 32.3 feet, right?' + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'quick', ' ("', 'brown', '") ', 'fox', ' ', 'can', "'", 't', + ' ', 'jump', ' ', '32', '.', '3', ' ', 'feet', ', ', + 'right', '?']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'quick', ' ', '(', '"', 'brown', '"', ')', ' ', 'fox', ' ', + "can't", ' ', 'jump', ' ', '32.3', ' ', 'feet', ',', ' ', + 'right', '?', '']) + + text = "The fox" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'fox', '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'fox', '']) + + text = "can't aujourd'hui l'objectif" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'can', "'", + 't', ' ', 'aujourd', "'", 'hui', ' ', 'l', "'", 'objectif', + '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', "can't", ' ', + "aujourd'hui", ' ', "l'objectif", '']) + + def test_line_boundary(self): + self.assertEqual(regex.findall(r".+", "Line 1\nLine 2\n"), ["Line 1", + "Line 2"]) + self.assertEqual(regex.findall(r".+", "Line 1\rLine 2\r"), + ["Line 1\rLine 2\r"]) + self.assertEqual(regex.findall(r".+", "Line 1\r\nLine 2\r\n"), + ["Line 1\r", "Line 2\r"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\nLine 2\n"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\rLine 2\r"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\r\nLine 2\r\n"), + ["Line 1", "Line 2"]) + + self.assertEqual(regex.search(r"^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"^abc", "\nabc"), None) + self.assertEqual(regex.search(r"^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)^abc", "\nabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "\rabc"), None) + + self.assertEqual(regex.search(r"abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?w)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\r").start(), 0) + + self.assertEqual(regex.search(r"(?m)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?m)^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?mw)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?mw)^abc", "\rabc").start(), 1) + + self.assertEqual(regex.search(r"(?m)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?mw)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\r").start(), 0) + + def test_branch_reset(self): + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "ac").groups(), ('a', + None, 'c')) + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "bc").groups(), (None, + 'b', 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "ac").groups(), ('a', None, 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "bc").groups(), (None, 'b', 'c')) + + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "abd").groups(), ('a', 'b', None, 'd')) + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "acd").groups(), ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "abd").groups(), + ('a', 'b', None, 'd')) + + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "acd").groups(), + ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?|(b)|(b))(d)", "abd").groups(), + ('a', 'b', 'd')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', None, 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + (None, 'b', 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + ('b', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "cde").groups(), ('c', 'd', 'e')) + + # Hg issue 87: Allow duplicate names of groups + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").groups(), ("a", "b", "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").capturesdict(), {"a": ["a"], "b": ["b"]}) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").groups(), ("d", None, "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").capturesdict(), {"a": ["c", "d"], "b": []}) + + def test_set(self): + self.assertEqual(regex.match(r"[a]", "a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a]", "A").span(), (0, 1)) + self.assertEqual(regex.match(r"[a-b]", r"a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a-b]", r"A").span(), (0, 1)) + + self.assertEqual(regex.sub(r"(?V0)([][])", r"-", "a[b]c"), "a-b-c") + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"(?i)[\p{Alpha}]", "A0"), ["A"]) + + self.assertEqual(regex.findall(r"[a\p{Alpha}]", "ab0"), ["a", "b"]) + self.assertEqual(regex.findall(r"[a\P{Alpha}]", "ab0"), ["a", "0"]) + self.assertEqual(regex.findall(r"(?i)[a\p{Alpha}]", "ab0"), ["a", + "b"]) + self.assertEqual(regex.findall(r"(?i)[a\P{Alpha}]", "ab0"), ["a", + "0"]) + + self.assertEqual(regex.findall(r"[a-b\p{Alpha}]", "abC0"), ["a", + "b", "C"]) + self.assertEqual(regex.findall(r"(?i)[a-b\p{Alpha}]", "AbC0"), ["A", + "b", "C"]) + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"[\P{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\p{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\P{Alpha}]", "a0"), ["a"]) + + self.assertEqual("".join(regex.findall(r"[^\d-h]", "a^b12c-h")), + 'a^bc') + self.assertEqual("".join(regex.findall(r"[^\dh]", "a^b12c-h")), + 'a^bc-') + self.assertEqual("".join(regex.findall(r"[^h\s\db]", "a^b 12c-h")), + 'a^c-') + self.assertEqual("".join(regex.findall(r"[^b\w]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^b\S]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^8\d]", "a 1b2")), 'a b') + + all_chars = "".join(chr(c) for c in range(0x100)) + self.assertEqual(len(regex.findall(r"\p{ASCII}", all_chars)), 128) + self.assertEqual(len(regex.findall(r"\p{Letter}", all_chars)), + 117) + self.assertEqual(len(regex.findall(r"\p{Digit}", all_chars)), 10) + + # Set operators + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Digit}]", + all_chars)), 10) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Cc}]", + all_chars)), 33) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Graph}]", + all_chars)), 94) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}--\p{Cc}]", + all_chars)), 95) + self.assertEqual(len(regex.findall(r"[\p{Letter}\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Letter}||\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"\p{HexDigit}", all_chars)), + 22) + self.assertEqual(len(regex.findall(r"(?V1)[\p{HexDigit}~~\p{Digit}]", + all_chars)), 12) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Digit}~~\p{HexDigit}]", + all_chars)), 12) + + self.assertEqual(repr(type(regex.compile(r"(?V0)([][-])"))), + self.PATTERN_CLASS) + self.assertEqual(regex.findall(r"(?V1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?iV1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?V1)[\w--a]","abc"), ["b", "c"]) + self.assertEqual(regex.findall(r"(?iV1)[\w--a]","abc"), ["b", "c"]) + + def test_various(self): + tests = [ + # Test ?P< and ?P= extensions. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with a digit. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + + # Same tests, for the ?P= form. + ('(?Pa)(?P=foo_123', 'aa', '', regex.error, + self.MISSING_RPAREN), + ('(?Pa)(?P=1)', 'aa', '1', ascii('a')), + ('(?Pa)(?P=0)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=-1)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=!)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=foo_124)', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?Pa)', 'a', '1', ascii('a')), + ('(?Pa)(?P=foo_123)', 'aa', '1', ascii('a')), + + # Mal-formed \g in pattern treated as literal for compatibility. + (r'(?a)\ga)\g<1>', 'aa', '1', ascii('a')), + (r'(?a)\g', 'aa', '', ascii(None)), + (r'(?a)\g', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?a)', 'a', '1', ascii('a')), + (r'(?a)\g', 'aa', '1', ascii('a')), + + # Test octal escapes. + ('\\1', 'a', '', regex.error, self.INVALID_GROUP_REF), # Backreference. + ('[\\1]', '\1', '0', "'\\x01'"), # Character. + ('\\09', chr(0) + '9', '0', ascii(chr(0) + '9')), + ('\\141', 'a', '0', ascii('a')), + ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', + '0,11', ascii(('abcdefghijklk9', 'k'))), + + # Test \0 is handled everywhere. + (r'\0', '\0', '0', ascii('\0')), + (r'[\0a]', '\0', '0', ascii('\0')), + (r'[a\0]', '\0', '0', ascii('\0')), + (r'[^a\0]', '\0', '', ascii(None)), + + # Test various letter escapes. + (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'\xff', '\377', '0', ascii(chr(255))), + + # New \x semantics. + (r'\x00ffffffffffffff', '\377', '', ascii(None)), + (r'\x00f', '\017', '', ascii(None)), + (r'\x00fe', '\376', '', ascii(None)), + + (r'\x00ff', '\377', '', ascii(None)), + (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + ('\t\n\v\r\f\a\\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', '0', ascii(chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7))), + (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', '0', + ascii('\t\n\v\r\f\b')), + + (r"^\w+=(\\[\000-\277]|[^\n\\])*", + "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", '0', + ascii("SRC=eval.c g.c blah blah blah \\\\")), + + # Test that . only matches \n in DOTALL mode. + ('a.b', 'acb', '0', ascii('acb')), + ('a.b', 'a\nb', '', ascii(None)), + ('a.*b', 'acc\nccb', '', ascii(None)), + ('a.{4,5}b', 'acc\nccb', '', ascii(None)), + ('a.b', 'a\rb', '0', ascii('a\rb')), + # Changed to positional flags in regex 2023.12.23. + ('a.b(?s)', 'a\nb', '', ascii(None)), + ('(?s)a.b', 'a\nb', '0', ascii('a\nb')), + ('a.*(?s)b', 'acc\nccb', '', ascii(None)), + ('(?s)a.*b', 'acc\nccb', '0', ascii('acc\nccb')), + ('(?s)a.{4,5}b', 'acc\nccb', '0', ascii('acc\nccb')), + + (')', '', '', regex.error, self.TRAILING_CHARS), # Unmatched right bracket. + ('', '', '0', "''"), # Empty pattern. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[\\-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('\\ba\\b', 'a-', '0', ascii('a')), + ('\\ba\\b', '-a', '0', ascii('a')), + ('\\ba\\b', '-a-', '0', ascii('a')), + ('\\by\\b', 'xy', '', ascii(None)), + ('\\by\\b', 'yz', '', ascii(None)), + ('\\by\\b', 'xyz', '', ascii(None)), + ('x\\b', 'xyz', '', ascii(None)), + + ('x\\B', 'xyz', '0', ascii('x')), + ('\\Bz', 'xyz', '0', ascii('z')), + ('z\\B', 'xyz', '', ascii(None)), + ('\\Bx', 'xyz', '', ascii(None)), + ('\\Ba\\B', 'a-', '', ascii(None)), + ('\\Ba\\B', '-a', '', ascii(None)), + ('\\Ba\\B', '-a-', '', ascii(None)), + ('\\By\\B', 'xy', '', ascii(None)), + ('\\By\\B', 'yz', '', ascii(None)), + ('\\By\\b', 'xy', '0', ascii('y')), + + ('\\by\\B', 'yz', '0', ascii('y')), + ('\\By\\B', 'xyz', '0', ascii('y')), + ('ab|cd', 'abc', '0', ascii('ab')), + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('$b', 'b', '', ascii(None)), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('^(.+)?B', 'AB', '1', ascii('A')), + ('(a+).\\1$', 'aaaaa', '0,1', ascii(('aaaaa', 'aa'))), + ('^(a+).\\1$', 'aaaa', '', ascii(None)), + ('(abc)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('([a-c]+)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + + ('(a)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)+\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a).+\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(a)ba*\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(aa|a)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a|aa)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a+)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('([abc]*)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('(a)(b)c|ab', 'ab', '0,1,2', ascii(('ab', None, None))), + + ('(a)+x', 'aaax', '0,1', ascii(('aaax', 'a'))), + ('([ac])+x', 'aacx', '0,1', ascii(('aacx', 'c'))), + ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', '0,1', + ascii(('d:msgs/tdir/sub1/', 'tdir/'))), + ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', + '0,1,2,3', ascii(('track1.title:TBlah blah blah', 'track1', + 'title', 'Blah blah blah'))), + ('([^N]*N)+', 'abNNxyzN', '0,1', ascii(('abNNxyzN', 'xyzN'))), + ('([^N]*N)+', 'abNNxyz', '0,1', ascii(('abNN', 'N'))), + ('([abc]*)x', 'abcx', '0,1', ascii(('abcx', 'abc'))), + ('([abc]*)x', 'abc', '', ascii(None)), + ('([xyz]*)x', 'abcx', '0,1', ascii(('x', ''))), + ('(a)+b|aac', 'aac', '0,1', ascii(('aac', None))), + + # Test symbolic groups. + ('(?Paaa)a', 'aaaa', '', regex.error, self.BAD_GROUP_NAME), + ('(?Paaa)a', 'aaaa', '0,id', ascii(('aaaa', 'aaa'))), + ('(?Paa)(?P=id)', 'aaaa', '0,id', ascii(('aaaa', 'aa'))), + ('(?Paa)(?P=xd)', 'aaaa', '', regex.error, self.UNKNOWN_GROUP), + + # Character properties. + (r"\g", "g", '0', ascii('g')), + (r"\g<1>", "g", '', regex.error, self.INVALID_GROUP_REF), + (r"(.)\g<1>", "gg", '0', ascii('gg')), + (r"(.)\g<1>", "gg", '', ascii(('gg', 'g'))), + (r"\N", "N", '0', ascii('N')), + (r"\N{LATIN SMALL LETTER A}", "a", '0', ascii('a')), + (r"\p", "p", '0', ascii('p')), + (r"\p{Ll}", "a", '0', ascii('a')), + (r"\P", "P", '0', ascii('P')), + (r"\P{Lu}", "p", '0', ascii('p')), + + # All tests from Perl. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{0,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab{1,}bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + + ('ab{1,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{1,3}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{3,4}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{4,5}bc', 'abbbbc', '', ascii(None)), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab{0,1}bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + ('ab{0,1}c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('ab|cd', 'abc', '0', ascii('ab')), + + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('$b', 'b', '', ascii(None)), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('a{1,}b{1,}c', 'aabbabc', '0', ascii('abc')), + ('a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('a.+?c', 'abcabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){0,}', 'ab', '0,1', ascii(('ab', 'b'))), + + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){1,}', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + ('(a+|b){0,1}', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + ('([abc])*d', 'abbbcd', '0,1', ascii(('abbbcd', 'c'))), + ('([abc])*bcd', 'abcd', '0,1', ascii(('abcd', 'a'))), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('((((((((((a))))))))))', 'a', '10', ascii('a')), + ('((((((((((a))))))))))\\10', 'aa', '0', ascii('aa')), + + # Python does not have the same rules for \\41 so this is a syntax error + # ('((((((((((a))))))))))\\41', 'aa', '', ascii(None)), + # ('((((((((((a))))))))))\\41', 'a!', '0', ascii('a!')), + ('((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + ('(?i)((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('(?i)abc', 'ABC', '0', ascii('ABC')), + + ('(?i)abc', 'XBC', '', ascii(None)), + ('(?i)abc', 'AXC', '', ascii(None)), + ('(?i)abc', 'ABX', '', ascii(None)), + ('(?i)abc', 'XABCY', '0', ascii('ABC')), + ('(?i)abc', 'ABABC', '0', ascii('ABC')), + ('(?i)ab*c', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab*?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{0,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + + ('(?i)ab+?bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab+bc', 'ABC', '', ascii(None)), + ('(?i)ab+bc', 'ABQ', '', ascii(None)), + ('(?i)ab{1,}bc', 'ABQ', '', ascii(None)), + ('(?i)ab+bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,3}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{3,4}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{4,5}?bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??bc', 'ABBC', '0', ascii('ABBC')), + + ('(?i)ab??bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab??bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??c', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?c', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABCC', '', ascii(None)), + ('(?i)^abc', 'ABCC', '0', ascii('ABC')), + ('(?i)^abc$', 'AABC', '', ascii(None)), + ('(?i)abc$', 'AABC', '0', ascii('ABC')), + + ('(?i)^', 'ABC', '0', ascii('')), + ('(?i)$', 'ABC', '0', ascii('')), + ('(?i)a.c', 'ABC', '0', ascii('ABC')), + ('(?i)a.c', 'AXC', '0', ascii('AXC')), + ('(?i)a.*?c', 'AXYZC', '0', ascii('AXYZC')), + ('(?i)a.*c', 'AXYZD', '', ascii(None)), + ('(?i)a[bc]d', 'ABC', '', ascii(None)), + ('(?i)a[bc]d', 'ABD', '0', ascii('ABD')), + ('(?i)a[b-d]e', 'ABD', '', ascii(None)), + ('(?i)a[b-d]e', 'ACE', '0', ascii('ACE')), + + ('(?i)a[b-d]', 'AAC', '0', ascii('AC')), + ('(?i)a[-b]', 'A-', '0', ascii('A-')), + ('(?i)a[b-]', 'A-', '0', ascii('A-')), + ('(?i)a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('(?i)a[]b', '-', '', regex.error, self.BAD_SET), + ('(?i)a[', '-', '', regex.error, self.BAD_SET), + ('(?i)a]', 'A]', '0', ascii('A]')), + ('(?i)a[]]b', 'A]B', '0', ascii('A]B')), + ('(?i)a[^bc]d', 'AED', '0', ascii('AED')), + ('(?i)a[^bc]d', 'ABD', '', ascii(None)), + + ('(?i)a[^-b]c', 'ADC', '0', ascii('ADC')), + ('(?i)a[^-b]c', 'A-C', '', ascii(None)), + ('(?i)a[^]b]c', 'A]C', '', ascii(None)), + ('(?i)a[^]b]c', 'ADC', '0', ascii('ADC')), + ('(?i)ab|cd', 'ABC', '0', ascii('AB')), + ('(?i)ab|cd', 'ABCD', '0', ascii('AB')), + ('(?i)()ef', 'DEF', '0,1', ascii(('EF', ''))), + ('(?i)*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)$b', 'B', '', ascii(None)), + + ('(?i)a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('(?i)a\\(b', 'A(B', '', ascii(('A(B',))), + ('(?i)a\\(*b', 'AB', '0', ascii('AB')), + ('(?i)a\\(*b', 'A((B', '0', ascii('A((B')), + ('(?i)a\\\\b', 'A\\B', '0', ascii('A\\B')), + ('(?i)abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('(?i)((a))', 'ABC', '0,1,2', ascii(('A', 'A', 'A'))), + ('(?i)(a)b(c)', 'ABC', '0,1,2', ascii(('ABC', 'A', 'C'))), + ('(?i)a+b+c', 'AABBABC', '0', ascii('ABC')), + + ('(?i)a{1,}b{1,}c', 'AABBABC', '0', ascii('ABC')), + ('(?i)a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('(?i)a.+?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.*?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.{0,5}?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)(a+|b)*', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){0,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)+', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){1,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)?', 'AB', '0,1', ascii(('A', 'A'))), + + ('(?i)(a+|b){0,1}', 'AB', '0,1', ascii(('A', 'A'))), + ('(?i)(a+|b){0,1}?', 'AB', '0,1', ascii(('', None))), + ('(?i))(', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)[^ab]*', 'CDE', '0', ascii('CDE')), + ('(?i)abc', '', '', ascii(None)), + ('(?i)a*', '', '0', ascii('')), + ('(?i)([abc])*d', 'ABBBCD', '0,1', ascii(('ABBBCD', 'C'))), + ('(?i)([abc])*bcd', 'ABCD', '0,1', ascii(('ABCD', 'A'))), + ('(?i)a|b|c|d|e', 'E', '0', ascii('E')), + ('(?i)(a|b|c|d|e)f', 'EF', '0,1', ascii(('EF', 'E'))), + + ('(?i)abcd*efg', 'ABCDEFG', '0', ascii('ABCDEFG')), + ('(?i)ab*', 'XABYABBBZ', '0', ascii('AB')), + ('(?i)ab*', 'XAYABBBZ', '0', ascii('A')), + ('(?i)(ab|cd)e', 'ABCDE', '0,1', ascii(('CDE', 'CD'))), + ('(?i)[abhgefdc]ij', 'HIJ', '0', ascii('HIJ')), + ('(?i)^(ab|cd)e', 'ABCDE', '', ascii(None)), + ('(?i)(abc|)ef', 'ABCDEF', '0,1', ascii(('EF', ''))), + ('(?i)(a|b)c*d', 'ABCD', '0,1', ascii(('BCD', 'B'))), + ('(?i)(ab|ab*)bc', 'ABC', '0,1', ascii(('ABC', 'A'))), + ('(?i)a([bc]*)c*', 'ABC', '0,1', ascii(('ABC', 'BC'))), + + ('(?i)a([bc]*)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]+)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]*)(c+d)', 'ABCD', '0,1,2', ascii(('ABCD', 'B', 'CD'))), + ('(?i)a[bcd]*dcdcde', 'ADCDCDE', '0', ascii('ADCDCDE')), + ('(?i)a[bcd]+dcdcde', 'ADCDCDE', '', ascii(None)), + ('(?i)(ab|a)b*c', 'ABC', '0,1', ascii(('ABC', 'AB'))), + ('(?i)((a)(b)c)(d)', 'ABCD', '1,2,3,4', ascii(('ABC', 'A', 'B', + 'D'))), + ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', '0', ascii('ALPHA')), + ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', '0,1', ascii(('BH', None))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', '0,1,2', ascii(('IJ', 'IJ', + 'J'))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + ('(?i)((((((((((a))))))))))', 'A', '10', ascii('A')), + ('(?i)((((((((((a))))))))))\\10', 'AA', '0', ascii('AA')), + #('(?i)((((((((((a))))))))))\\41', 'AA', '', ascii(None)), + #('(?i)((((((((((a))))))))))\\41', 'A!', '0', ascii('A!')), + ('(?i)(((((((((a)))))))))', 'A', '0', ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', '1', + ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', '1', + ascii('C')), + ('(?i)multiple words of text', 'UH-UH', '', ascii(None)), + + ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', '0', + ascii('MULTIPLE WORDS')), + ('(?i)(.*)c(.*)', 'ABCDE', '0,1,2', ascii(('ABCDE', 'AB', 'DE'))), + ('(?i)\\((.*), (.*)\\)', '(A, B)', '2,1', ascii(('B', 'A'))), + ('(?i)[k]', 'AB', '', ascii(None)), + # ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', ascii(ABCD-$&-\\ABCD)), + # ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', ascii(BC-$1-\\BC)), + ('(?i)a[-]?c', 'AC', '0', ascii('AC')), + ('(?i)(abc)\\1', 'ABCABC', '1', ascii('ABC')), + ('(?i)([a-c]*)\\1', 'ABCABC', '1', ascii('ABC')), + ('a(?!b).', 'abad', '0', ascii('ad')), + ('a(?=d).', 'abad', '0', ascii('ad')), + ('a(?=c|d).', 'abad', '0', ascii('ad')), + + ('a(?:b|c|d)(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)*(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)+?(.)', 'ace', '1', ascii('e')), + ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', '1,2', ascii(('c', 'e'))), + + # Lookbehind: split by : but not if it is escaped by -. + ('(?]*?b', 'a>b', '', ascii(None)), + # Bug 490573: minimizing repeat problem. + (r'^a*?$', 'foo', '', ascii(None)), + # Bug 470582: nested groups problem. + (r'^((a)c)?(ab)$', 'ab', '1,2,3', ascii((None, None, 'ab'))), + # Another minimizing repeat problem (capturing groups in assertions). + ('^([ab]*?)(?=(b)?)c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?!(b))c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?(.){0,2})d", "abcd").captures(1), + ['b', 'c']) + self.assertEqual(regex.search(r"(.)+", "a").captures(1), ['a']) + + def test_guards(self): + m = regex.search(r"(X.*?Y\s*){3}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((3, 21), (12, 15), (16, 18))) + + m = regex.search(r"(X.*?Y\s*){3,}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((0, 21), (12, 15), (16, 18))) + + m = regex.search(r'\d{4}(\s*\w)?\W*((?!\d)\w){2}', "9999XX") + self.assertEqual(m.span(0, 1, 2), ((0, 6), (-1, -1), (5, 6))) + + m = regex.search(r'A\s*?.*?(\n+.*?\s*?){0,2}\(X', 'A\n1\nS\n1 (X') + self.assertEqual(m.span(0, 1), ((0, 10), (5, 8))) + + m = regex.search(r'Derde\s*:', 'aaaaaa:\nDerde:') + self.assertEqual(m.span(), (8, 14)) + m = regex.search(r'Derde\s*:', 'aaaaa:\nDerde:') + self.assertEqual(m.span(), (7, 13)) + + def test_turkic(self): + # Turkish has dotted and dotless I/i. + pairs = "I=i;I=\u0131;i=\u0130" + + all_chars = set() + matching = set() + for pair in pairs.split(";"): + ch1, ch2 = pair.split("=") + all_chars.update((ch1, ch2)) + matching.add((ch1, ch1)) + matching.add((ch1, ch2)) + matching.add((ch2, ch1)) + matching.add((ch2, ch2)) + + for ch1 in all_chars: + for ch2 in all_chars: + m = regex.match(r"(?i)\A" + ch1 + r"\Z", ch2) + if m: + if (ch1, ch2) not in matching: + self.fail("{} matching {}".format(ascii(ch1), + ascii(ch2))) + else: + if (ch1, ch2) in matching: + self.fail("{} not matching {}".format(ascii(ch1), + ascii(ch2))) + + def test_named_lists(self): + options = ["one", "two", "three"] + self.assertEqual(regex.match(r"333\L444", "333one444", + bar=options).group(), "333one444") + self.assertEqual(regex.match(r"(?i)333\L444", "333TWO444", + bar=options).group(), "333TWO444") + self.assertEqual(regex.match(r"333\L444", "333four444", + bar=options), None) + + options = [b"one", b"two", b"three"] + self.assertEqual(regex.match(br"333\L444", b"333one444", + bar=options).group(), b"333one444") + self.assertEqual(regex.match(br"(?i)333\L444", b"333TWO444", + bar=options).group(), b"333TWO444") + self.assertEqual(regex.match(br"333\L444", b"333four444", + bar=options), None) + + self.assertEqual(repr(type(regex.compile(r"3\L4\L+5", + bar=["one", "two", "three"]))), self.PATTERN_CLASS) + + self.assertEqual(regex.findall(r"^\L", "solid QWERT", + options=set(['good', 'brilliant', '+s\\ol[i}d'])), []) + self.assertEqual(regex.findall(r"^\L", "+solid QWERT", + options=set(['good', 'brilliant', '+solid'])), ['+solid']) + + options = ["STRASSE"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["STRASSE", "stress"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["stra\N{LATIN SMALL LETTER SHARP S}e"] + self.assertEqual(regex.match(r"(?fi)\L", "STRASSE", + words=options).span(), (0, 7)) + + options = ["kit"] + self.assertEqual(regex.search(r"(?i)\L", "SKITS", + words=options).span(), (1, 4)) + self.assertEqual(regex.search(r"(?i)\L", + "SK\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}TS", + words=options).span(), (1, 4)) + + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " stra\N{LATIN SMALL LETTER SHARP S}e STRASSE ").span(), (1, 15)) + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " STRASSE stra\N{LATIN SMALL LETTER SHARP S}e ").span(), (1, 15)) + + self.assertEqual(regex.search(r"^\L$", "", options=[]).span(), + (0, 0)) + + def test_fuzzy(self): + # Some tests borrowed from TRE library tests. + self.assertEqual(repr(type(regex.compile('(fou){s,e<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s,e}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1,e<=10}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){s<=1,e<=1,1i+1d<1}'))), + self.PATTERN_CLASS) + + text = 'molasses anaconda foo bar baz smith anderson ' + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<1}', text), + None) + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<2}', + text).span(0, 1), ((9, 17), (9, 17))) + self.assertEqual(regex.search('(ananda){1i+1d<2}', text), None) + self.assertEqual(regex.search(r"(?:\bznacnda){e<=2}", text)[0], + "anaconda") + self.assertEqual(regex.search(r"(?:\bnacnda){e<=2}", text)[0], + "anaconda") + + text = 'anaconda foo bar baz smith anderson' + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e<=5}', text).span(0, + 1), ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=2,d<=2,e<=5}', text).span(0, + 1), ((7, 10), (7, 10))) + self.assertEqual(regex.search('(?e)(fuu){i<=2,d<=2,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e}', text).span(0, 1), + ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e}', text).span(0, + 1), ((9, 10), (9, 10))) + + self.assertEqual(repr(type(regex.compile('(approximate){s<=3,1i+1d<3}'))), + self.PATTERN_CLASS) + + # No cost limit. + self.assertEqual(regex.search('(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?e)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 3), (0, 3))) + self.assertEqual(regex.search('(?b)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((11, 16), (11, 16))) + + # At most two errors. + self.assertEqual(regex.search('(foobar){e<=2}', + 'xirefoabrzlfd').span(0, 1), ((4, 9), (4, 9))) + self.assertEqual(regex.search('(foobar){e<=2}', 'xirefoabzlfd'), None) + + # At most two inserts or substitutions and max two errors total. + self.assertEqual(regex.search('(foobar){i<=2,s<=2,e<=2}', + 'oobargoobaploowap').span(0, 1), ((5, 11), (5, 11))) + + # Find best whole word match for "foobar". + self.assertEqual(regex.search('\\b(foobar){e}\\b', 'zfoobarz').span(0, + 1), ((0, 8), (0, 8))) + self.assertEqual(regex.search('\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?b)\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((15, 21), (15, 21))) + + # Match whole string, allow only 1 error. + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarx').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooxbar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xoobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobax').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'oobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooba').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobarx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarxx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xxfoobar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoxbar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbarx'), None) + + # At most one insert, two deletes, and three substitutions. + # Additionally, deletes cost two and substitutes one, and total + # cost must be less than 4. + self.assertEqual(regex.search('(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((6, 13), (6, + 13))) + self.assertEqual(regex.search('(?b)(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((34, 39), + (34, 39))) + + # Partially fuzzy matches. + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobarzap').span(0, + 1), ((0, 9), (3, 6))) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'fobarzap'), None) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobrzap').span(0, + 1), ((0, 8), (3, 5))) + + text = ('www.cnn.com 64.236.16.20\nwww.slashdot.org 66.35.250.150\n' + 'For useful information, use www.slashdot.org\nthis is demo data!\n') + self.assertEqual(regex.search(r'(?s)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (120, 120))) + self.assertEqual(regex.search(r'(?es)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (93, 100))) + self.assertEqual(regex.search(r'^.*(dot.org){e}.*$', text).span(0, 1), + ((0, 119), (24, 101))) + + # Behaviour is unexpected, but arguably not wrong. It first finds the + # best match, then the best in what follows, etc. + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), [" dog", "cot"]) + self.assertEqual(regex.findall(r"(?e)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog ", "cot"]) + self.assertEqual(regex.findall(r"(?er)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b" dog", b"cot"]) + self.assertEqual(regex.findall(br"(?e)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog ", b"cot"]) + self.assertEqual(regex.findall(br"(?er)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + + self.assertEqual(regex.search(r"(\w+) (\1{e<=1})", "foo fou").groups(), + ("foo", "fou")) + self.assertEqual(regex.search(r"(?r)(\2{e<=1}) (\w+)", + "foo fou").groups(), ("foo", "fou")) + self.assertEqual(regex.search(br"(\w+) (\1{e<=1})", + b"foo fou").groups(), (b"foo", b"fou")) + + self.assertEqual(regex.findall(r"(?:(?:QR)+){e}", "abcde"), ["abcde", + ""]) + self.assertEqual(regex.findall(r"(?:Q+){e}", "abc"), ["abc", ""]) + + # Hg issue 41: = for fuzzy matches + self.assertEqual(regex.match(r"(?:service detection){0[^()]+)|(?R))*\)", "(ab(cd)ef)")[ + : ], ("(ab(cd)ef)", "ef")) + self.assertEqual(regex.search(r"\(((?>[^()]+)|(?R))*\)", + "(ab(cd)ef)").captures(1), ["ab", "cd", "(cd)", "ef"]) + + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)")[ : ], ("(ab(cd)ef)", "ab")) + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)").captures(1), ["ef", "cd", "(cd)", "ab"]) + + self.assertEqual(regex.search(r"\(([^()]+|(?R))*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "e")) + + self.assertEqual(regex.search(r"(?r)\(((?R)|[^()]+)*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "a")) + + self.assertEqual(regex.search(r"(foo(\(((?:(?>[^()]+)|(?2))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + self.assertEqual(regex.search(r"(?r)(foo(\(((?:(?2)|(?>[^()]+))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + rgx = regex.compile(r"""^\s*(<\s*([a-zA-Z:]+)(?:\s*[a-zA-Z:]*\s*=\s*(?:'[^']*'|"[^"]*"))*\s*(/\s*)?>(?:[^<>]*|(?1))*(?(3)|<\s*/\s*\2\s*>))\s*$""") + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), False) + + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('< fooo / >')), True) + # The next regex should and does match. Perl 5.14 agrees. + #self.assertEqual(bool(rgx.search('foo')), False) + self.assertEqual(bool(rgx.search('foo')), False) + + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('')), True) + + def test_copy(self): + # PatternObjects are immutable, therefore there's no need to clone them. + r = regex.compile("a") + self.assertTrue(copy.copy(r) is r) + self.assertTrue(copy.deepcopy(r) is r) + + # MatchObjects are normally mutable because the target string can be + # detached. However, after the target string has been detached, a + # MatchObject becomes immutable, so there's no need to clone it. + m = r.match("a") + self.assertTrue(copy.copy(m) is not m) + self.assertTrue(copy.deepcopy(m) is not m) + + self.assertTrue(m.string is not None) + m2 = copy.copy(m) + m2.detach_string() + self.assertTrue(m.string is not None) + self.assertTrue(m2.string is None) + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.copy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.deepcopy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.copy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.deepcopy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + def test_format(self): + self.assertEqual(regex.subf(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), "foo bar => bar foo") + self.assertEqual(regex.subf(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), "bar foo") + + self.assertEqual(regex.subfn(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), ("foo bar => bar foo", 1)) + self.assertEqual(regex.subfn(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), ("bar foo", 1)) + + self.assertEqual(regex.match(r"(\w+) (\w+)", + "foo bar").expandf("{0} => {2} {1}"), "foo bar => bar foo") + + def test_fullmatch(self): + self.assertEqual(bool(regex.fullmatch(r"abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx", endpos=3)), True) + + self.assertEqual(bool(regex.fullmatch(r"abc", "xabc", pos=1)), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1)), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1, + endpos=4)), True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx", endpos=3)), + True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabc", pos=1)), + True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1)), + False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1, + endpos=4)), True) + + def test_issue_18468(self): + self.assertTypedEqual(regex.sub('y', 'a', 'xyz'), 'xaz') + self.assertTypedEqual(regex.sub('y', StrSubclass('a'), + StrSubclass('xyz')), 'xaz') + self.assertTypedEqual(regex.sub(b'y', b'a', b'xyz'), b'xaz') + self.assertTypedEqual(regex.sub(b'y', BytesSubclass(b'a'), + BytesSubclass(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', bytearray(b'a'), + bytearray(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', memoryview(b'a'), + memoryview(b'xyz')), b'xaz') + + for string in ":a:b::c", StrSubclass(":a:b::c"): + self.assertTypedEqual(regex.split(":", string), ['', 'a', 'b', '', + 'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(":*", string), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + '', '', 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + else: + self.assertTypedEqual(regex.split(":*", string), ['', 'a', 'b', + 'c']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + 'a', ':', 'b', '::', 'c']) + + for string in (b":a:b::c", BytesSubclass(b":a:b::c"), + bytearray(b":a:b::c"), memoryview(b":a:b::c")): + self.assertTypedEqual(regex.split(b":", string), [b'', b'a', b'b', + b'', b'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(b":*", string), [b'', b'', + b'a', b'', b'b', b'', b'c', b'']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'', b'', b'a', b':', b'', b'', b'b', b'::', b'', b'', b'c', + b'', b'']) + else: + self.assertTypedEqual(regex.split(b":*", string), [b'', b'a', + b'b', b'c']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'a', b':', b'b', b'::', b'c']) + + for string in "a:b::c:::d", StrSubclass("a:b::c:::d"): + self.assertTypedEqual(regex.findall(":+", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:+)", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:)(:*)", string), [(":", ""), + (":", ":"), (":", "::")]) + + for string in (b"a:b::c:::d", BytesSubclass(b"a:b::c:::d"), + bytearray(b"a:b::c:::d"), memoryview(b"a:b::c:::d")): + self.assertTypedEqual(regex.findall(b":+", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:+)", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:)(:*)", string), [(b":", + b""), (b":", b":"), (b":", b"::")]) + + for string in 'a', StrSubclass('a'): + self.assertEqual(regex.match('a', string).groups(), ()) + self.assertEqual(regex.match('(a)', string).groups(), ('a',)) + self.assertEqual(regex.match('(a)', string).group(0), 'a') + self.assertEqual(regex.match('(a)', string).group(1), 'a') + self.assertEqual(regex.match('(a)', string).group(1, 1), ('a', + 'a')) + + for string in (b'a', BytesSubclass(b'a'), bytearray(b'a'), + memoryview(b'a')): + self.assertEqual(regex.match(b'a', string).groups(), ()) + self.assertEqual(regex.match(b'(a)', string).groups(), (b'a',)) + self.assertEqual(regex.match(b'(a)', string).group(0), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1, 1), (b'a', + b'a')) + + def test_partial(self): + self.assertEqual(regex.match('ab', 'a', partial=True).partial, True) + self.assertEqual(regex.match('ab', 'a', partial=True).span(), (0, 1)) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).partial, + True) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).span(), (0, + 3)) + self.assertEqual(regex.match(r'cats', 'catch', partial=True), None) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).partial, False) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).span(), (0, 6)) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).partial, True) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).span(), (0, 5)) + + self.assertEqual(regex.match(r'\d{4}$', '1234', partial=True).partial, + False) + + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).partial, False) + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).span(), (0, 4)) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).partial, True) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).span(), (0, 3)) + + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).partial, False) + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).span(), (0, 4)) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).partial, True) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).span(), (0, 3)) + self.assertEqual(regex.match(r'(?fi)\L', 'po\uFB06', + partial=True, words=['POS']), None) + + self.assertEqual(regex.match(r'[a-z]*4R$', 'a', partial=True).span(), + (0, 1)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab4', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4R', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', '4a', partial=True), None) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a44', partial=True), None) + + def test_hg_bugs(self): + # Hg issue 28: regex.compile("(?>b)") causes "TypeError: 'Character' + # object is not subscriptable" + self.assertEqual(bool(regex.compile("(?>b)", flags=regex.V1)), True) + + # Hg issue 29: regex.compile("^((?>\w+)|(?>\s+))*$") causes + # "TypeError: 'GreedyRepeat' object is not iterable" + self.assertEqual(bool(regex.compile(r"^((?>\w+)|(?>\s+))*$", + flags=regex.V1)), True) + + # Hg issue 31: atomic and normal groups in recursive patterns + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?:[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(b(cd)e)f)g)h"), ['(b(cd)e)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual(regex.findall(r"(?r)\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual([m.group() for m in + regex.finditer(r"\((?:[^()]*+|(?0))*\)", "a(b(c(de)fg)h")], + ['(c(de)fg)']) + + # Hg issue 32: regex.search("a(bc)d", "abcd", regex.I|regex.V1) returns + # None + self.assertEqual(regex.search("a(bc)d", "abcd", regex.I | + regex.V1).group(0), "abcd") + + # Hg issue 33: regex.search("([\da-f:]+)$", "E", regex.I|regex.V1) + # returns None + self.assertEqual(regex.search(r"([\da-f:]+)$", "E", regex.I | + regex.V1).group(0), "E") + self.assertEqual(regex.search(r"([\da-f:]+)$", "e", regex.I | + regex.V1).group(0), "e") + + # Hg issue 34: regex.search("^(?=ab(de))(abd)(e)", "abde").groups() + # returns (None, 'abd', 'e') instead of ('de', 'abd', 'e') + self.assertEqual(regex.search("^(?=ab(de))(abd)(e)", "abde").groups(), + ('de', 'abd', 'e')) + + # Hg issue 35: regex.compile("\ ", regex.X) causes "_regex_core.error: + # bad escape" + self.assertEqual(bool(regex.match(r"\ ", " ", flags=regex.X)), True) + + # Hg issue 36: regex.search("^(a|)\1{2}b", "b") returns None + self.assertEqual(regex.search(r"^(a|)\1{2}b", "b").group(0, 1), ('b', + '')) + + # Hg issue 37: regex.search("^(a){0,0}", "abc").group(0,1) returns + # ('a', 'a') instead of ('', None) + self.assertEqual(regex.search("^(a){0,0}", "abc").group(0, 1), ('', + None)) + + # Hg issue 38: regex.search("(?>.*/)b", "a/b") returns None + self.assertEqual(regex.search("(?>.*/)b", "a/b").group(0), "a/b") + + # Hg issue 39: regex.search("((?i)blah)\\s+\\1", "blah BLAH") doesn't + # return None + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"((?i)blah)\s+\1", "blah BLAH"), None) + + # Hg issue 40: regex.search("(\()?[^()]+(?(1)\)|)", "(abcd").group(0) + # returns "bcd" instead of "abcd" + self.assertEqual(regex.search(r"(\()?[^()]+(?(1)\)|)", + "(abcd").group(0), "abcd") + + # Hg issue 42: regex.search("(a*)*", "a", flags=regex.V1).span(1) + # returns (0, 1) instead of (1, 1) + self.assertEqual(regex.search("(a*)*", "a").span(1), (1, 1)) + self.assertEqual(regex.search("(a*)*", "aa").span(1), (2, 2)) + self.assertEqual(regex.search("(a*)*", "aaa").span(1), (3, 3)) + + # Hg issue 43: regex.compile("a(?#xxx)*") causes "_regex_core.error: + # nothing to repeat" + self.assertEqual(regex.search("a(?#xxx)*", "aaa").group(), "aaa") + + # Hg issue 44: regex.compile("(?=abc){3}abc") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("(?=abc){3}abc", "abcabcabc").span(), (0, + 3)) + + # Hg issue 45: regex.compile("^(?:a(?:(?:))+)+") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "a").span(), (0, 1)) + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "aa").span(), (0, 2)) + + # Hg issue 46: regex.compile("a(?x: b c )d") causes + # "_regex_core.error: missing )" + self.assertEqual(regex.search("a(?x: b c )d", "abcd").group(0), "abcd") + + # Hg issue 47: regex.compile("a#comment\n*", flags=regex.X) causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("a#comment\n*", "aaa", + flags=regex.X).group(0), "aaa") + + # Hg issue 48: regex.search("(a(?(1)\\1)){4}", "a"*10, + # flags=regex.V1).group(0,1) returns ('aaaaa', 'a') instead of ('aaaaaaaaaa', 'aaaa') + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){1}", + "aaaaaaaaaa").span(0, 1), ((0, 1), (0, 1))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){2}", + "aaaaaaaaaa").span(0, 1), ((0, 3), (1, 3))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){3}", + "aaaaaaaaaa").span(0, 1), ((0, 6), (3, 6))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){4}", + "aaaaaaaaaa").span(0, 1), ((0, 10), (6, 10))) + + # Hg issue 49: regex.search("(a)(?<=b(?1))", "baz", regex.V1) returns + # None incorrectly + self.assertEqual(regex.search("(?V1)(a)(?<=b(?1))", "baz").group(0), + "a") + + # Hg issue 50: not all keywords are found by named list with + # overlapping keywords when full Unicode casefolding is required + self.assertEqual(regex.findall(r'(?fi)\L', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05', + keywords=['post','pos']), ['POST', 'Post', 'post', 'po\u017Ft', + 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)pos|post', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POS', + 'Pos', 'pos', 'po\u017F', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|pos', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|another', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + + # Hg issue 51: regex.search("((a)(?1)|(?2))", "a", flags=regex.V1) + # returns None incorrectly + self.assertEqual(regex.search("(?V1)((a)(?1)|(?2))", "a").group(0, 1, + 2), ('a', 'a', None)) + + # Hg issue 52: regex.search("(\\1xx|){6}", "xx", + # flags=regex.V1).span(0,1) returns incorrect value + self.assertEqual(regex.search(r"(?V1)(\1xx|){6}", "xx").span(0, 1), + ((0, 2), (2, 2))) + + # Hg issue 53: regex.search("(a|)+", "a") causes MemoryError + self.assertEqual(regex.search("(a|)+", "a").group(0, 1), ("a", "")) + + # Hg issue 54: regex.search("(a|)*\\d", "a"*80) causes MemoryError + self.assertEqual(regex.search(r"(a|)*\d", "a" * 80), None) + + # Hg issue 55: regex.search("^(?:a?b?)*$", "ac") take a very long time. + self.assertEqual(regex.search("^(?:a?b?)*$", "ac"), None) + + # Hg issue 58: bad named character escape sequences like "\\N{1}" + # treats as "N" + self.assertRaisesRegex(regex.error, self.UNDEF_CHAR_NAME, lambda: + regex.compile("\\N{1}")) + + # Hg issue 59: regex.search("\\Z", "a\na\n") returns None incorrectly + self.assertEqual(regex.search("\\Z", "a\na\n").span(0), (4, 4)) + + # Hg issue 60: regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", "xayxay") + # returns None incorrectly + self.assertEqual(regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", + "xayxay").group(0), "xayxay") + + # Hg issue 61: regex.search("[^a]", "A", regex.I).group(0) returns '' + # incorrectly + self.assertEqual(regex.search("(?i)[^a]", "A"), None) + + # Hg issue 63: regex.search("[[:ascii:]]", "\N{KELVIN SIGN}", + # flags=regex.I|regex.V1) doesn't return None + self.assertEqual(regex.search("(?i)[[:ascii:]]", "\N{KELVIN SIGN}"), + None) + + # Hg issue 66: regex.search("((a|b(?1)c){3,5})", "baaaaca", + # flags=regex.V1).groups() returns ('baaaac', 'baaaac') instead of ('aaaa', 'a') + self.assertEqual(regex.search("((a|b(?1)c){3,5})", "baaaaca").group(0, + 1, 2), ('aaaa', 'aaaa', 'a')) + + # Hg issue 71: non-greedy quantifier in lookbehind + self.assertEqual(regex.findall(r"(?<=:\S+ )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S* )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S+? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S*? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + + # Hg issue 73: conditional patterns + self.assertEqual(regex.search(r"(?:fe)?male", "female").group(), + "female") + self.assertEqual([m.group() for m in + regex.finditer(r"(fe)?male: h(?(1)(er)|(is)) (\w+)", + "female: her dog; male: his cat. asdsasda")], ['female: her dog', + 'male: his cat']) + + # Hg issue 78: "Captures" doesn't work for recursive calls + self.assertEqual(regex.search(r'(?\((?:[^()]++|(?&rec))*\))', + 'aaa(((1+0)+1)+1)bbb').captures('rec'), ['(1+0)', '((1+0)+1)', + '(((1+0)+1)+1)']) + + # Hg issue 80: Escape characters throws an exception + self.assertRaisesRegex(regex.error, self.BAD_ESCAPE, lambda: + regex.sub('x', '\\', 'x'), ) + + # Hg issue 82: error range does not work + fz = "(CAGCCTCCCATTTCAGAATATACATCC){1a(?b))', "ab").spans("x"), [(1, + 2), (0, 2)]) + + # Hg issue 91: match.expand is extremely slow + # Check that the replacement cache works. + self.assertEqual(regex.sub(r'(-)', lambda m: m.expand(r'x'), 'a-b-c'), + 'axbxc') + + # Hg issue 94: Python crashes when executing regex updates + # pattern.findall + rx = regex.compile(r'\bt(est){i<2}', flags=regex.V1) + self.assertEqual(rx.search("Some text"), None) + self.assertEqual(rx.findall("Some text"), []) + + # Hg issue 95: 'pos' for regex.error + self.assertRaisesRegex(regex.error, self.MULTIPLE_REPEAT, lambda: + regex.compile(r'.???')) + + # Hg issue 97: behaviour of regex.escape's special_only is wrong + # + # Hg issue 244: Make `special_only=True` the default in + # `regex.escape()` + self.assertEqual(regex.escape('foo!?', special_only=False), 'foo\\!\\?') + self.assertEqual(regex.escape('foo!?', special_only=True), 'foo!\\?') + self.assertEqual(regex.escape('foo!?'), 'foo!\\?') + + self.assertEqual(regex.escape(b'foo!?', special_only=False), b'foo\\!\\?') + self.assertEqual(regex.escape(b'foo!?', special_only=True), + b'foo!\\?') + self.assertEqual(regex.escape(b'foo!?'), b'foo!\\?') + + # Hg issue 100: strange results from regex.search + self.assertEqual(regex.search('^([^z]*(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*(?:WWWi|w))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*?(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + + # Hg issue 101: findall() broken (seems like memory corruption) + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.IGNORECASE | + regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + # Hg issue 106: * operator not working correctly with sub() + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'xx') + else: + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'x') + self.assertEqual(regex.sub('(?V1).*', 'x', 'test'), 'xx') + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|||||||||') + else: + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|t|e|s|t|') + self.assertEqual(regex.sub('(?V1).*?', '|', 'test'), '|||||||||') + + # Hg issue 112: re: OK, but regex: SystemError + self.assertEqual(regex.sub(r'^(@)\n(?!.*?@)(.*)', + r'\1\n==========\n\2', '@\n', flags=regex.DOTALL), '@\n==========\n') + + # Hg issue 109: Edit distance of fuzzy match + self.assertEqual(regex.match(r'(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cat){e<=1}', 'caz').fuzzy_counts, + (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', 'c ats').fuzzy_counts, + (1, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=1}', 'c ats').fuzzy_counts, + (0, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + # Hg issue 115: Infinite loop when processing backreferences + self.assertEqual(regex.findall(r'\bof ([a-z]+) of \1\b', + 'To make use of one of these modules'), []) + + # Hg issue 125: Reference to entire match (\g<0>) in + # Pattern.sub() doesn't work as of 2014.09.22 release. + self.assertEqual(regex.sub(r'x', r'\g<0>', 'x'), 'x') + + # Unreported issue: no such builtin as 'ascii' in Python 2. + self.assertEqual(bool(regex.match(r'a', 'a', regex.DEBUG)), True) + + # Hg issue 131: nested sets behaviour + self.assertEqual(regex.findall(r'(?V1)[[b-e]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[b-e--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[[bcde]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[bcde--cd]', 'abcdef'), ['b', + 'e']) + + # Hg issue 132: index out of range on null property \p{} + self.assertRaisesRegex(regex.error, '^unknown property at position 4$', + lambda: regex.compile(r'\p{}')) + + # Issue 23692. + self.assertEqual(regex.match('(?:()|(?(1)()|z)){2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + self.assertEqual(regex.match('(?:()|(?(1)()|z)){0,2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + + # Hg issue 137: Posix character class :punct: does not seem to be + # supported. + + # Posix compatibility as recommended here: + # http://www.unicode.org/reports/tr18/#Compatibility_Properties + + # Posix in Unicode. + chars = ''.join(chr(c) for c in range(0x10000)) + + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alnum:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alpha:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Alpha}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:ascii:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{InBasicLatin}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:blank:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:cntrl:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{gc=Control}+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:digit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:graph:]]+''', + chars))), ascii(''.join(regex.findall(r'''[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:lower:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Lower}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:print:]]+''', + chars))), ascii(''.join(regex.findall(r'''(?V1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:punct:]]+''', + chars))), + ascii(''.join(regex.findall(r'''(?V1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:space:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Whitespace}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:upper:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Upper}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:word:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:xdigit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9A-Fa-f]+''', + chars)))) + + # Posix in ASCII. + chars = bytes(range(0x100)) + + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alnum:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alpha:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Alpha}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:ascii:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\x00-\x7F]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:blank:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:cntrl:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{gc=Control}+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:digit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:graph:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:lower:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Lower}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:print:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:punct:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:space:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Whitespace}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:upper:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Upper}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:word:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:xdigit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9A-Fa-f]+''', chars)))) + + # Hg issue 138: grapheme anchored search not working properly. + self.assertEqual(ascii(regex.search(r'\X$', 'ab\u2103').group()), + ascii('\u2103')) + + # Hg issue 139: Regular expression with multiple wildcards where first + # should match empty string does not always work. + self.assertEqual(regex.search("([^L]*)([^R]*R)", "LtR").groups(), ('', + 'LtR')) + + # Hg issue 140: Replace with REVERSE and groups has unexpected + # behavior. + self.assertEqual(regex.sub(r'(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.sub(r'(?r)(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(.)', 'x{1}y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(?r)(.)', 'x{1}y', 'ab'), 'xayxby') + + # Hg issue 141: Crash on a certain partial match. + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).span(), (0, 2)) + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).partial, True) + + # Hg issue 143: Partial matches have incorrect span if prefix is '.' + # wildcard. + self.assertEqual(regex.search('OXRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.XRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.{1,3}XRG', 'OOGOX', + partial=True).span(), (1, 5)) + + # Hg issue 144: Latest version problem with matching 'R|R'. + self.assertEqual(regex.match('R|R', 'R').span(), (0, 1)) + + # Hg issue 146: Forced-fail (?!) works improperly in conditional. + self.assertEqual(regex.match(r'(.)(?(1)(?!))', 'xy'), None) + + # Groups cleared after failure. + self.assertEqual(regex.findall(r'(y)?(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + self.assertEqual(regex.findall(r'(y)?+(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + + # Hg issue 147: Fuzzy match can return match points beyond buffer end. + self.assertEqual([m.span() for m in regex.finditer(r'(?i)(?:error){e}', + 'regex failure')], [(0, 5), (5, 10), (10, 13), (13, 13)]) + self.assertEqual([m.span() for m in + regex.finditer(r'(?fi)(?:error){e}', 'regex failure')], [(0, 5), (5, + 10), (10, 13), (13, 13)]) + + # Hg issue 150: Have an option for POSIX-compatible longest match of + # alternates. + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10b12')[0], '10b12') + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10E+12')[0], '10E+12') + + self.assertEqual(regex.search(r'(?p)(\w|ae|oe|ue|ss)', 'ae')[0], 'ae') + self.assertEqual(regex.search(r'(?p)one(self)?(selfsufficient)?', + 'oneselfsufficient')[0], 'oneselfsufficient') + + # Hg issue 151: Request: \K. + self.assertEqual(regex.search(r'(ab\Kcd)', 'abcd').group(0, 1), ('cd', + 'abcd')) + self.assertEqual(regex.findall(r'\w\w\K\w\w', 'abcdefgh'), ['cd', + 'gh']) + self.assertEqual(regex.findall(r'(\w\w\K\w\w)', 'abcdefgh'), ['abcd', + 'efgh']) + + self.assertEqual(regex.search(r'(?r)(ab\Kcd)', 'abcd').group(0, 1), + ('ab', 'abcd')) + self.assertEqual(regex.findall(r'(?r)\w\w\K\w\w', 'abcdefgh'), ['ef', + 'ab']) + self.assertEqual(regex.findall(r'(?r)(\w\w\K\w\w)', 'abcdefgh'), + ['efgh', 'abcd']) + + # Hg issue 152: Request: Request: (?(DEFINE)...). + self.assertEqual(regex.search(r'(?(DEFINE)(?\d+)(?\w+))(?&quant) (?&item)', + '5 elephants')[0], '5 elephants') + + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').group('routine'), None) + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').captures('routine'), ['a']) + + # Hg issue 153: Request: (*SKIP). + self.assertEqual(regex.search(r'12(*FAIL)|3', '123')[0], '3') + self.assertEqual(regex.search(r'(?r)12(*FAIL)|3', '123')[0], '3') + + self.assertEqual(regex.search(r'\d+(*PRUNE)\d', '123'), None) + self.assertEqual(regex.search(r'\d+(?=(*PRUNE))\d', '123')[0], '123') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d(*PRUNE)\d+', '123'), None) + self.assertEqual(regex.search(r'(?r)\d(?<=(*PRUNE))\d+', '123')[0], + '123') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123bcd')[0], '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + # Hg issue 154: Segmentation fault 11 when working with an atomic group + text = """June 30, December 31, 2013 2012 +some words follow: +more words and numbers 1,234,567 9,876,542 +more words and numbers 1,234,567 9,876,542""" + self.assertEqual(len(regex.findall(r'(?2014|2013 ?2012)', text)), 1) + + # Hg issue 156: regression on atomic grouping + self.assertEqual(regex.match('1(?>2)', '12').span(), (0, 2)) + + # Hg issue 157: regression: segfault on complex lookaround + self.assertEqual(regex.match(r'(?V1w)(?=(?=[^A-Z]*+[A-Z])(?=[^a-z]*+[a-z]))(?=\D*+\d)(?=\p{Alphanumeric}*+\P{Alphanumeric})\A(?s:.){8,255}+\Z', + 'AAaa11!!')[0], 'AAaa11!!') + + # Hg issue 158: Group issue with (?(DEFINE)...) + TEST_REGEX = regex.compile(r'''(?smx) +(?(DEFINE) + (? + ^,[^,]+, + ) +) + +# Group 2 is defined on this line +^,([^,]+), + +(?:(?!(?&subcat)[\r\n]+(?&subcat)).)+ +''') + + TEST_DATA = ''' +,Cat 1, +,Brand 1, +some +thing +,Brand 2, +other +things +,Cat 2, +,Brand, +Some +thing +''' + + self.assertEqual([m.span(1, 2) for m in + TEST_REGEX.finditer(TEST_DATA)], [((-1, -1), (2, 7)), ((-1, -1), (54, + 59))]) + + # Hg issue 161: Unexpected fuzzy match results + self.assertEqual(regex.search('(abcdefgh){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 14)) + self.assertEqual(regex.search('(abcdefghi){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 15)) + + # Hg issue 163: allow lookarounds in conditionals. + self.assertEqual(regex.match(r'(?:(?=\d)\d+\b|\w+)', '123abc').span(), + (0, 6)) + self.assertEqual(regex.match(r'(?(?=\d)\d+\b|\w+)', '123abc'), None) + self.assertEqual(regex.search(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you").span(), (7, 10)) + self.assertEqual(regex.findall(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you but I don't hate her either"), ['you', 'her']) + + # Hg issue 180: bug of POSIX matching. + self.assertEqual(regex.search(r'(?p)a*(.*?)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.search(r'(?p)a*(.*)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.sub(r'(?p)a*(.*?)', r'\1', 'aaabbb'), 'bbb') + self.assertEqual(regex.sub(r'(?p)a*(.*)', r'\1', 'aaabbb'), 'bbb') + + # Hg issue 192: Named lists reverse matching doesn't work with + # IGNORECASE and V1 + self.assertEqual(regex.match(r'(?irV0)\L', '21', kw=['1']).span(), + (1, 2)) + self.assertEqual(regex.match(r'(?irV1)\L', '21', kw=['1']).span(), + (1, 2)) + + # Hg issue 193: Alternation and .REVERSE flag. + self.assertEqual(regex.search('a|b', '111a222').span(), (3, 4)) + self.assertEqual(regex.search('(?r)a|b', '111a222').span(), (3, 4)) + + # Hg issue 194: .FULLCASE and Backreference + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?ifr)<\1><(CLI)>', + '').span(), (0, 10)) + + # Hg issue 195: Pickle (or otherwise serial) the compiled regex + r = regex.compile(r'\L', options=['foo', 'bar']) + p = pickle.dumps(r) + r = pickle.loads(p) + self.assertEqual(r.match('foo').span(), (0, 3)) + + # Hg issue 196: Fuzzy matching on repeated regex not working as + # expected + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + + # Hg issue 197: ValueError in regex.compile + self.assertRaises(regex.error, lambda: + regex.compile(b'00000\\0\\00\\^\50\\00\\U05000000')) + + # Hg issue 198: ValueError in regex.compile + self.assertRaises(regex.error, lambda: regex.compile(b"{e', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?ri)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fi)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fri)\L', '22', aa=['121', + '22'])), True) + + # Hg issue 208: Named list, (?ri) flags, Backreference + self.assertEqual(regex.search(r'(?r)\1dog..(?<=(\L))$', 'ccdogcc', + aa=['bcb', 'cc']). span(), (0, 7)) + self.assertEqual(regex.search(r'(?ir)\1dog..(?<=(\L))$', + 'ccdogcc', aa=['bcb', 'cc']). span(), (0, 7)) + + # Hg issue 210: Fuzzy matching and Backreference + self.assertEqual(regex.search(r'(2)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + self.assertEqual(regex.search(r'(\d)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + + # Hg issue 211: Segmentation fault with recursive matches and atomic + # groups + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-))').span(), (0, 5)) + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-)+)'), None) + + # Hg issue 212: Unexpected matching difference with .*? between re and + # regex + self.assertEqual(regex.match(r"x.*? (.).*\1(.*)\1", + 'x |y| z|').span(), (0, 9)) + self.assertEqual(regex.match(r"\.sr (.*?) (.)(.*)\2(.*)\2(.*)", + r'.sr h |||').span(), (0, 35)) + + # Hg issue 213: Segmentation Fault + a = '"\\xF9\\x80\\xAEqdz\\x95L\\xA7\\x89[\\xFE \\x91)\\xF9]\\xDB\'\\x99\\x09=\\x00\\xFD\\x98\\x22\\xDD\\xF1\\xB6\\xC3 Z\\xB6gv\\xA5x\\x93P\\xE1r\\x14\\x8Cv\\x0C\\xC0w\\x15r\\xFFc%" ' + py_regex_pattern = r'''(?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``)))) (?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))))''' + self.assertEqual(bool(regex.search(py_regex_pattern, a)), False) + + # Hg Issue 216: Invalid match when using negative lookbehind and pipe + self.assertEqual(bool(regex.match('foo(?<=foo)', 'foo')), True) + self.assertEqual(bool(regex.match('foo(?.*\!\w*\:.*)|(?P.*))', + '!')), False) + + # Hg issue 220: Misbehavior of group capture with OR operand + self.assertEqual(regex.match(r'\w*(ea)\w*|\w*e(?!a)\w*', + 'easier').groups(), ('ea', )) + + # Hg issue 225: BESTMATCH in fuzzy match not working + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + # Hg issue 226: Error matching at start of string + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).span(), (0, 11)) + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).fuzzy_counts, (0, 8, 0)) + + # Hg issue 227: Incorrect behavior for ? operator with UNICODE + + # IGNORECASE + self.assertEqual(regex.search(r'a?yz', 'xxxxyz', flags=regex.FULLCASE | + regex.IGNORECASE).span(), (4, 6)) + + # Hg issue 230: Is it a bug of (?(DEFINE)...) + self.assertEqual(regex.findall(r'(?:(?![a-d]).)+', 'abcdefgh'), + ['efgh']) + self.assertEqual(regex.findall(r'''(?(DEFINE)(?P(?:(?![a-d]).)))(?&mydef)+''', + 'abcdefgh'), ['efgh']) + + # Hg issue 238: Not fully re backward compatible + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1,3}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){3}', + '"Erm....yes. T..T...Thank you for that."'), []) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){2}', + '"Erm....yes. T..T...Thank you for that."'), [('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T..', 'T', '..'), ('T...', 'T', '...')]) + + # Hg issue 247: Unexpected result with fuzzy matching and lookahead + # expression + self.assertEqual(regex.search(r'(?:ESTONIA(?!\w)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + self.assertEqual(regex.search(r'(?:ESTONIA(?=\W)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + + self.assertEqual(regex.search(r'(?:(?.))(?&func)', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + # Hg issue 271: Comment logic different between Re and Regex + self.assertEqual(bool(regex.match(r'ab(?#comment\))cd', 'abcd')), True) + + # Hg issue 276: Partial Matches yield incorrect matches and bounds + self.assertEqual(regex.search(r'[a-z]+ [a-z]*?:', 'foo bar', + partial=True).span(), (0, 7)) + self.assertEqual(regex.search(r'(?r):[a-z]*? [a-z]+', 'foo bar', + partial=True).span(), (0, 7)) + + # Hg issue 291: Include Script Extensions as a supported Unicode property + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Cakm}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Sylo}', + '\u09EF')), True) + + # Hg issue #293: scx (Script Extensions) property currently matches + # incorrectly + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Latin}', 'P')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Ahom}', 'P')), False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Common}', '4')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Caucasian_Albanian}', '4')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Arabic}', '\u062A')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Balinese}', '\u062A')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Devanagari}', '\u091C')), + True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Batak}', '\u091C')), False) + + # Hg issue 296: Group references are not taken into account when group is reporting the last match + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').captures('x'), + ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').group('x'), + 'b') + + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').captures('x'), ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').group('x'), 'c') + + # Hg issue 299: Partial gives misleading results with "open ended" regexp + self.assertEqual(regex.match('(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('(?r)(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('a*', '', partial=True).partial, False) + self.assertEqual(regex.match('a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('a+', '', partial=True).partial, True) + self.assertEqual(regex.match('a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('a++', '', partial=True).partial, True) + self.assertEqual(regex.match('a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match('(?r)a*', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a++', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match(r"(?:\s*\w+'*)+", 'whatever', partial=True).partial, + False) + + # Hg issue 300: segmentation fault + pattern = ('(?PGGCGTCACACTTTGCTATGCCATAGCAT[AG]TTTATCCATAAGA' + 'TTAGCGGATCCTACCTGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGA' + 'CTATCCGGTATTACCCGGCATGACAGGAGTAAAA){e<=1}' + '(?P[ACGT]{1059}){e<=2}' + '(?PTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAG' + 'TTGTAAGGATATGCCATTCTAGA){e<=0}' + '(?P[ACGT]{18}){e<=0}' + '(?PAGATCGG[CT]AGAGCGTCGTGTAGGGAAAGAGTGTGG){e<=1}') + + text = ('GCACGGCGTCACACTTTGCTATGCCATAGCATATTTATCCATAAGATTAGCGGATCCTACC' + 'TGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGACTATCCGGTATTACC' + 'CGGCATGACAGGAGTAAAAATGGCTATCGACGAAAACAAACAGAAAGCGTTGGCGGCAGCACTGGGC' + 'CAGATTGAGAAACAATTTGGTAAAGGCTCCATCATGCGCCTGGGTGAAGACCGTTCCATGGATGTGG' + 'AAACCATCTCTACCGGTTCGCTTTCACTGGATATCGCGCTTGGGGCAGGTGGTCTGCCGATGGGCCG' + 'TATCGTCGAAATCTACGGACCGGAATCTTCCGGTAAAACCACGCTGACGCTGCAGGTGATCGCCGCA' + 'GCGCAGCGTGAAGGTAAAACCTGTGCGTTTATCGATGCTGAACACGCGCTGGACCCAATCTACGCAC' + 'GTAAACTGGGCGTCGATATCGACAACCTGCTGTGCTCCCAGCCGGACACCGGCGAGCAGGCACTGGA' + 'AATCTGTGACGCCCTGGCGCGTTCTGGCGCAGTAGACGTTATCGTCGTTGACTCCGTGGCGGCACTG' + 'ACGCCGAAAGCGGAAATCGAAGGCGAAATCGGCGACTCTCATATGGGCCTTGCGGCACGTATGATGA' + 'GCCAGGCGATGCGTAAGCTGGCGGGTAACCTGAAGCAGTCCAACACGCTGCTGATCTTCATCAACCC' + 'CATCCGTATGAAAATTGGTGTGATGTTCGGCAACCCGGAAACCACTTACCGGTGGTAACGCGCTGAA' + 'ATTCTACGCCTCTGTTCGTCTCGACATCCGTTAAATCGGCGCGGTGAAAGAGGGCGAAAACGTGGTG' + 'GGTAGCGAAACCCGCGTGAAAGTGGTGAAGAACAAAATCGCTGCGCCGTTTAAACAGGCTGAATTCC' + 'AGATCCTCTACGGCGAAGGTATCAACTTCTACCCCGAACTGGTTGACCTGGGCGTAAAAGAGAAGCT' + 'GATCGAGAAAGCAGGCGCGTGGTACAGCTACAAAGGTGAGAAGATCGGTCAGGGTAAAGCGAATGCG' + 'ACTGCCTGGCTGAAATTTAACCCGGAAACCGCGAAAGAGATCGAGTGAAAAGTACGTGAGTTGCTGC' + 'TGAGCAACCCGAACTCAACGCCGGATTTCTCTGTAGATGATAGCGAAGGCGTAGCAGAAACTAACGA' + 'AGATTTTTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAGTTGT' + 'AAGGATATGCCATTCTAGACAGTTAACACACCAACAAAGATCGGTAGAGCGTCGTGTAGGGAAAGAG' + 'TGTGGTACC') + + m = regex.search(pattern, text, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 1, 0)) + self.assertEqual(m.fuzzy_changes, ([], [1206], [])) + + # Hg issue 306: Fuzzy match parameters not respecting quantifier scope + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfood').fuzzy_counts, (0, 0, 0)) + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfoot').fuzzy_counts, (1, 0, 0)) + + # Hg issue 312: \X not matching graphemes with zero-width-joins + self.assertEqual(regex.findall(r'\X', + '\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466'), + ['\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466']) + + # Hg issue 320: Abnormal performance + self.assertEqual(bool(regex.search(r'(?=a)a', 'a')), True) + self.assertEqual(bool(regex.search(r'(?!b)a', 'a')), True) + + # Hg issue 327: .fullmatch() causes MemoryError + self.assertEqual(regex.fullmatch(r'((\d)*?)*?', '123').span(), (0, 3)) + + # Hg issue 329: Wrong group matches when question mark quantifier is used within a look behind + self.assertEqual(regex.search(r'''(?(DEFINE)(?(?THIS_SHOULD_NOT_MATCHx?)|(?right))).*(?<=(?&mydef).*)''', + 'x right').capturesdict(), {'mydef': ['right'], 'wrong': [], 'right': + ['right']}) + + # Hg issue 338: specifying allowed characters when fuzzy-matching + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:[u]}', 'cut')), True) + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:u}', 'cut')), True) + + # Hg issue 353: fuzzy changes negative indexes + self.assertEqual(regex.search(r'(?be)(AGTGTTCCCCGCGCCAGCGGGGATAAACCG){s<=5,i<=5,d<=5,s+i+d<=10}', + 'TTCCCCGCGCCAGCGGGGATAAACCG').fuzzy_changes, ([], [], [0, 1, 3, 5])) + + # Git issue 364: Contradictory values in fuzzy_counts and fuzzy_changes + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_counts, (1, 0, + 1)) + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_changes, ([0], + [], [1])) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + + # Git issue 370: Confusions about Fuzzy matching behavior + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){e}', + '$ 10,112.111.12').fuzzy_counts, (6, 0, 5)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1,i<=1,d<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=3}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=2}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + + self.assertEqual(regex.fullmatch(r'(?e)(?:0?,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.fullmatch(r'(?e)(?:0??,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + + # Git issue 371: Specifying character set when fuzzy-matching allows characters not in the set + self.assertEqual(regex.search(r"\b(?e)(?:\d{6,20}){i<=5:[\-\\\/]}\b", + "cat dog starting at 00:01132.000. hello world"), None) + + # Git issue 385: Comments in expressions + self.assertEqual(bool(regex.compile('(?#)')), True) + self.assertEqual(bool(regex.compile('(?x)(?#)')), True) + + # Git issue 394: Unexpected behaviour in fuzzy matching with limited character set with IGNORECASE flag + self.assertEqual(regex.findall(r'(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + self.assertEqual(regex.findall(r'(?i)(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + + # Git issue 403: Fuzzy matching with wrong distance (unnecessary substitutions) + self.assertEqual(regex.match(r'^(test){e<=5}$', 'terstin', + flags=regex.B).fuzzy_counts, (0, 3, 0)) + + # Git issue 408: regex fails with a quantified backreference but succeeds with repeated backref + self.assertEqual(bool(regex.match(r"(?:(x*)\1\1\1)*x$", "x" * 5)), True) + self.assertEqual(bool(regex.match(r"(?:(x*)\1{3})*x$", "x" * 5)), True) + + # Git issue 415: Fuzzy character restrictions don't apply to insertions at "right edge" + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'te5t').group(), + 'te5t') + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'tezt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'tes5t').group(), + 'tes5t') + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'teszt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', + 'tes5t').fuzzy_changes, ([], [3], [])) + self.assertEqual(regex.match(r't(es){i<=1,0.*)(?PCTTCC){e<=1}(?P([ACGT]){4,6})(?PCAATACCGACTCCTCACTGTGT){e<=2}(?P([ACGT]){0,6}$)' + + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + m = regex.match(pattern, sequence, flags=regex.ENHANCEMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + # Git issue 433: Disagreement between fuzzy_counts and fuzzy_changes + pattern = r'(?P.*)(?PAACACTGG){e<=1}(?P([AT][CG]){5}){e<=2}(?PGTAACCGAAG){e<=2}(?P([ACGT]){0,6}$)' + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTAACCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([], [], [])) + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTCCCCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (2, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([24, 25], [], [])) + + # Git issue 439: Unmatched groups: sub vs subf + self.assertEqual(regex.sub(r'(test1)|(test2)', r'matched: \1\2', 'test1'), 'matched: test1') + self.assertEqual(regex.subf(r'(test1)|(test2)', r'matched: {1}{2}', 'test1'), 'matched: test1') + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expand(r'matched: \1\2'), 'matched: test1'), + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expandf(r'matched: {1}{2}'), 'matched: test1') + + # Git issue 442: Fuzzy regex matching doesn't seem to test insertions correctly + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having"), None) + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having", flags=regex.I), None) + + # Git issue 467: Scoped inline flags 'a', 'u' and 'L' affect global flags + self.assertEqual(regex.match(r'(?a:\w)\w', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + self.assertEqual(regex.match(r'(?a:\w)(?u:\w)', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + + # Git issue 473: Emoji classified as letter + self.assertEqual(regex.match(r'^\p{LC}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}'), None) + self.assertEqual(regex.match(r'^\p{So}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}').span(), (0, 1)) + + # Git issue 474: regex has no equivalent to `re.Match.groups()` for captures + self.assertEqual(regex.match(r'(.)+', 'abc').allcaptures(), (['abc'], ['a', 'b', 'c'])) + self.assertEqual(regex.match(r'(.)+', 'abc').allspans(), ([(0, 3)], [(0, 1), (1, 2), (2, 3)])) + + # Git issue 477: \v for vertical spacing + self.assertEqual(bool(regex.fullmatch(r'\p{HorizSpace}+', '\t \xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000')), True) + self.assertEqual(bool(regex.fullmatch(r'\p{VertSpace}+', '\n\v\f\r\x85\u2028\u2029')), True) + + # Git issue 479: Segmentation fault when using conditional pattern + self.assertEqual(regex.match(r'(?(?<=A)|(?(?![^B])C|D))', 'A'), None) + self.assertEqual(regex.search(r'(?(?<=A)|(?(?![^B])C|D))', 'A').span(), (1, 1)) + + # Git issue 494: Backtracking failure matching regex ^a?(a?)b?c\1$ against string abca + self.assertEqual(regex.search(r"^a?(a?)b?c\1$", "abca").span(), (0, 4)) + + # Git issue 498: Conditional negative lookahead inside positive lookahead fails to match + self.assertEqual(regex.match(r'(?(?=a).|..)', 'ab').span(), (0, 1)) + self.assertEqual(regex.match(r'(?(?=b).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!a).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!b).|..)', 'ab').span(), (0, 1)) + + # Git issue 525: segfault when fuzzy matching empty list + self.assertEqual(regex.match(r"(\L){e<=5}", "blah", foo=[]).span(), (0, 0)) + + # Git issue 527: `VERBOSE`/`X` flag breaks `\N` escapes + self.assertEqual(regex.compile(r'\N{LATIN SMALL LETTER A}').match('a').span(), (0, 1)) + self.assertEqual(regex.compile(r'\N{LATIN SMALL LETTER A}', flags=regex.X).match('a').span(), (0, 1)) + + def test_fuzzy_ext(self): + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', '-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', '-')), + False) + + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + 'ea')), True) + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + '-a')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssst')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'stss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'tsss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ss-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'sss-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 's-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'st\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'ts\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}s-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + 's-\N{LATIN SMALL LETTER SHARP S}')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + '-s\N{LATIN SMALL LETTER SHARP S}')), False) + + def test_subscripted_captures(self): + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{0} {0[0]} {0[-1]}'), 'abc abc abc') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}'), + 'c a b c c b a') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}'), + 'c a b c c b a') + + self.assertEqual(regex.subf(r'(?P.)+', r'{0} {0[0]} {0[-1]}', + 'abc'), 'abc abc abc') + self.assertEqual(regex.subf(r'(?P.)+', + '{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}', 'abc'), + 'c a b c c b a') + self.assertEqual(regex.subf(r'(?P.)+', + '{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}', 'abc'), + 'c a b c c b a') + + def test_more_zerowidth(self): + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r'\b|:+', 'a::bc'), ['', 'a', '', '', + 'bc', '']) + self.assertEqual(regex.sub(r'\b|:+', '-', 'a::bc'), '-a---bc-') + self.assertEqual(regex.findall(r'\b|:+', 'a::bc'), ['', '', '::', + '', '']) + self.assertEqual([m.span() for m in regex.finditer(r'\b|:+', + 'a::bc')], [(0, 0), (1, 1), (1, 3), (3, 3), (5, 5)]) + self.assertEqual([m.span() for m in regex.finditer(r'(?m)^\s*?$', + 'foo\n\n\nbar')], [(4, 4), (4, 5), (5, 5)]) + + def test_line_ending(self): + self.assertEqual(regex.findall(r'\R', '\r\n\n\x0B\f\r\x85\u2028\u2029'), + ['\r\n', '\n', '\x0B', '\f', '\r', '\x85', '\u2028', '\u2029']) + self.assertEqual(regex.findall(br'\R', b'\r\n\n\x0B\f\r\x85'), [b'\r\n', + b'\n', b'\x0B', b'\f', b'\r']) + +def test_main(): + unittest.main(verbosity=2) + +if __name__ == "__main__": + test_main() diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..069f3ad50b951da979447dc9bcff41dc9ca2153b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/__init__.py @@ -0,0 +1,157 @@ +""" +The :mod:`sklearn` module includes functions to configure global settings and +get information about the working environment. +""" + +# Machine learning module for Python +# ================================== +# +# sklearn is a Python module integrating classical machine +# learning algorithms in the tightly-knit world of scientific Python +# packages (numpy, scipy, matplotlib). +# +# It aims to provide simple and efficient solutions to learning problems +# that are accessible to everybody and reusable in various contexts: +# machine-learning as a versatile tool for science and engineering. +# +# See https://scikit-learn.org for complete documentation. + +import logging +import os +import random +import sys + +from ._config import config_context, get_config, set_config + +logger = logging.getLogger(__name__) + + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y.0 # For first release after an increment in Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.Y.ZaN # Alpha release +# X.Y.ZbN # Beta release +# X.Y.ZrcN # Release Candidate +# X.Y.Z # Final release +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev' +# +__version__ = "1.4.2" + + +# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded +# simultaneously. This can happen for instance when calling BLAS inside a +# prange. Setting the following environment variable allows multiple OpenMP +# libraries to be loaded. It should not degrade performances since we manually +# take care of potential over-subcription performance issues, in sections of +# the code where nested OpenMP loops can happen, by dynamically reconfiguring +# the inner OpenMP runtime to temporarily disable it while under the scope of +# the outer OpenMP parallel section. +os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True") + +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") + +try: + # This variable is injected in the __builtins__ by the build + # process. It is used to enable importing subpackages of sklearn when + # the binaries are not built + # mypy error: Cannot determine type of '__SKLEARN_SETUP__' + __SKLEARN_SETUP__ # type: ignore +except NameError: + __SKLEARN_SETUP__ = False + +if __SKLEARN_SETUP__: + sys.stderr.write("Partial import of sklearn during the build process.\n") + # We are not importing the rest of scikit-learn during the build + # process, as it may not be compiled yet +else: + # `_distributor_init` allows distributors to run custom init code. + # For instance, for the Windows wheel, this is used to pre-load the + # vcomp shared library runtime for OpenMP embedded in the sklearn/.libs + # sub-folder. + # It is necessary to do this prior to importing show_versions as the + # later is linked to the OpenMP runtime to make it possible to introspect + # it and importing it first would fail if the OpenMP dll cannot be found. + from . import ( + __check_build, # noqa: F401 + _distributor_init, # noqa: F401 + ) + from .base import clone + from .utils._show_versions import show_versions + + __all__ = [ + "calibration", + "cluster", + "covariance", + "cross_decomposition", + "datasets", + "decomposition", + "dummy", + "ensemble", + "exceptions", + "experimental", + "externals", + "feature_extraction", + "feature_selection", + "gaussian_process", + "inspection", + "isotonic", + "kernel_approximation", + "kernel_ridge", + "linear_model", + "manifold", + "metrics", + "mixture", + "model_selection", + "multiclass", + "multioutput", + "naive_bayes", + "neighbors", + "neural_network", + "pipeline", + "preprocessing", + "random_projection", + "semi_supervised", + "svm", + "tree", + "discriminant_analysis", + "impute", + "compose", + # Non-modules: + "clone", + "get_config", + "set_config", + "config_context", + "show_versions", + ] + + _BUILT_WITH_MESON = False + try: + import sklearn._built_with_meson # noqa: F401 + + _BUILT_WITH_MESON = True + except ModuleNotFoundError: + pass + + +def setup_module(module): + """Fixture for the tests to assure globally controllable seeding of RNGs""" + + import numpy as np + + # Check if a random seed exists in the environment, if not create one. + _random_seed = os.environ.get("SKLEARN_SEED", None) + if _random_seed is None: + _random_seed = np.random.uniform() * np.iinfo(np.int32).max + _random_seed = int(_random_seed) + print("I: Seeding RNGs with %r" % _random_seed) + np.random.seed(_random_seed) + random.seed(_random_seed) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_config.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ccaca0a98f79317ae2eba75f0d8dfbcd0021ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_config.py @@ -0,0 +1,373 @@ +"""Global configuration state and functions for management +""" +import os +import threading +from contextlib import contextmanager as contextmanager + +_global_config = { + "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)), + "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)), + "print_changed_only": True, + "display": "diagram", + "pairwise_dist_chunk_size": int( + os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256) + ), + "enable_cython_pairwise_dist": True, + "array_api_dispatch": False, + "transform_output": "default", + "enable_metadata_routing": False, + "skip_parameter_validation": False, +} +_threadlocal = threading.local() + + +def _get_threadlocal_config(): + """Get a threadlocal **mutable** configuration. If the configuration + does not exist, copy the default global configuration.""" + if not hasattr(_threadlocal, "global_config"): + _threadlocal.global_config = _global_config.copy() + return _threadlocal.global_config + + +def get_config(): + """Retrieve current values for configuration set by :func:`set_config`. + + Returns + ------- + config : dict + Keys are parameter names that can be passed to :func:`set_config`. + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + set_config : Set global scikit-learn configuration. + + Examples + -------- + >>> import sklearn + >>> config = sklearn.get_config() + >>> config.keys() + dict_keys([...]) + """ + # Return a copy of the threadlocal configuration so that users will + # not be able to modify the configuration with the returned dict. + return _get_threadlocal_config().copy() + + +def set_config( + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Set global scikit-learn configuration. + + .. versionadded:: 0.19 + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. Global default: False. + + .. versionadded:: 0.19 + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. Global default: 1024. + + .. versionadded:: 0.20 + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()' while the default + behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with + all the non-changed parameters. + + .. versionadded:: 0.21 + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. Default is 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Default is 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Default is False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Examples + -------- + >>> from sklearn import set_config + >>> set_config(display='diagram') # doctest: +SKIP + """ + local_config = _get_threadlocal_config() + + if assume_finite is not None: + local_config["assume_finite"] = assume_finite + if working_memory is not None: + local_config["working_memory"] = working_memory + if print_changed_only is not None: + local_config["print_changed_only"] = print_changed_only + if display is not None: + local_config["display"] = display + if pairwise_dist_chunk_size is not None: + local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size + if enable_cython_pairwise_dist is not None: + local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist + if array_api_dispatch is not None: + from .utils._array_api import _check_array_api_dispatch + + _check_array_api_dispatch(array_api_dispatch) + local_config["array_api_dispatch"] = array_api_dispatch + if transform_output is not None: + local_config["transform_output"] = transform_output + if enable_metadata_routing is not None: + local_config["enable_metadata_routing"] = enable_metadata_routing + if skip_parameter_validation is not None: + local_config["skip_parameter_validation"] = skip_parameter_validation + + +@contextmanager +def config_context( + *, + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Context manager for global scikit-learn configuration. + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. If None, the existing value won't change. + The default value is False. + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. If None, the existing value won't change. + The default value is 1024. + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()', but would print + 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters + when False. If None, the existing value won't change. + The default value is True. + + .. versionchanged:: 0.23 + Default changed from False to True. + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. If None, the existing value won't change. + The default value is 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Default is 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Default is False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + Yields + ------ + None. + + See Also + -------- + set_config : Set global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Notes + ----- + All settings, not just those presently modified, will be returned to + their previous values when the context manager is exited. + + Examples + -------- + >>> import sklearn + >>> from sklearn.utils.validation import assert_all_finite + >>> with sklearn.config_context(assume_finite=True): + ... assert_all_finite([float('nan')]) + >>> with sklearn.config_context(assume_finite=True): + ... with sklearn.config_context(assume_finite=False): + ... assert_all_finite([float('nan')]) + Traceback (most recent call last): + ... + ValueError: Input contains NaN... + """ + old_config = get_config() + set_config( + assume_finite=assume_finite, + working_memory=working_memory, + print_changed_only=print_changed_only, + display=display, + pairwise_dist_chunk_size=pairwise_dist_chunk_size, + enable_cython_pairwise_dist=enable_cython_pairwise_dist, + array_api_dispatch=array_api_dispatch, + transform_output=transform_output, + enable_metadata_routing=enable_metadata_routing, + skip_parameter_validation=skip_parameter_validation, + ) + + try: + yield + finally: + set_config(**old_config) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_distributor_init.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..a0142ac80878fea20373cff1801218809bcf2953 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_distributor_init.py @@ -0,0 +1,10 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of scikit-learn. + +For example, this is a good place to put any checks for hardware requirements. + +The scikit-learn standard source distribution will not put code in this file, +so you can safely replace this file with your own version. +""" diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2900bd1a8a3788f680f5ab5234491b7f074e367a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/_min_dependencies.py b/llmeval-env/lib/python3.10/site-packages/sklearn/_min_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..f8ff53cf59336a485b73797e2a41a2e933fb7cf5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/_min_dependencies.py @@ -0,0 +1,65 @@ +"""All minimum dependencies for scikit-learn.""" +import argparse +from collections import defaultdict + +# scipy and cython should by in sync with pyproject.toml +NUMPY_MIN_VERSION = "1.19.5" +SCIPY_MIN_VERSION = "1.6.0" +JOBLIB_MIN_VERSION = "1.2.0" +THREADPOOLCTL_MIN_VERSION = "2.0.0" +PYTEST_MIN_VERSION = "7.1.2" +CYTHON_MIN_VERSION = "3.0.8" + + +# 'build' and 'install' is included to have structured metadata for CI. +# It will NOT be included in setup's extras_require +# The values are (version_spec, comma separated tags) +dependent_packages = { + "numpy": (NUMPY_MIN_VERSION, "build, install"), + "scipy": (SCIPY_MIN_VERSION, "build, install"), + "joblib": (JOBLIB_MIN_VERSION, "install"), + "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"), + "cython": (CYTHON_MIN_VERSION, "build"), + "matplotlib": ("3.3.4", "benchmark, docs, examples, tests"), + "scikit-image": ("0.17.2", "docs, examples, tests"), + "pandas": ("1.1.5", "benchmark, docs, examples, tests"), + "seaborn": ("0.9.0", "docs, examples"), + "memory_profiler": ("0.57.0", "benchmark, docs"), + "pytest": (PYTEST_MIN_VERSION, "tests"), + "pytest-cov": ("2.9.0", "tests"), + "ruff": ("0.0.272", "tests"), + "black": ("23.3.0", "tests"), + "mypy": ("1.3", "tests"), + "pyamg": ("4.0.0", "tests"), + "polars": ("0.19.12", "tests"), + "pyarrow": ("12.0.0", "tests"), + "sphinx": ("6.0.0", "docs"), + "sphinx-copybutton": ("0.5.2", "docs"), + "sphinx-gallery": ("0.15.0", "docs"), + "numpydoc": ("1.2.0", "docs, tests"), + "Pillow": ("7.1.2", "docs"), + "pooch": ("1.6.0", "docs, examples, tests"), + "sphinx-prompt": ("1.3.0", "docs"), + "sphinxext-opengraph": ("0.4.2", "docs"), + "plotly": ("5.14.0", "docs, examples"), + # XXX: Pin conda-lock to the latest released version (needs manual update + # from time to time) + "conda-lock": ("2.4.2", "maintenance"), +} + + +# create inverse mapping for setuptools +tag_to_packages: dict = defaultdict(list) +for package, (min_version, extras) in dependent_packages.items(): + for extra in extras.split(", "): + tag_to_packages[extra].append("{}>={}".format(package, min_version)) + + +# Used by CI to get the min dependencies +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Get min dependencies for a package") + + parser.add_argument("package", choices=dependent_packages) + args = parser.parse_args() + min_version = dependent_packages[args.package][0] + print(min_version) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/base.py new file mode 100644 index 0000000000000000000000000000000000000000..e73ae4c8a180edbbc43fdac4ac20841ebd759204 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/base.py @@ -0,0 +1,1478 @@ +"""Base classes for all estimators.""" + +# Author: Gael Varoquaux +# License: BSD 3 clause + +import copy +import functools +import inspect +import platform +import re +import warnings +from collections import defaultdict + +import numpy as np + +from . import __version__ +from ._config import config_context, get_config +from .exceptions import InconsistentVersionWarning +from .utils import _IS_32BIT +from .utils._estimator_html_repr import _HTMLDocumentationLinkMixin, estimator_html_repr +from .utils._metadata_requests import _MetadataRequester, _routing_enabled +from .utils._param_validation import validate_parameter_constraints +from .utils._set_output import _SetOutputMixin +from .utils._tags import ( + _DEFAULT_TAGS, +) +from .utils.validation import ( + _check_feature_names_in, + _check_y, + _generate_get_feature_names_out, + _get_feature_names, + _is_fitted, + _num_features, + check_array, + check_is_fitted, + check_X_y, +) + + +def clone(estimator, *, safe=True): + """Construct a new unfitted estimator with the same parameters. + + Clone does a deep copy of the model in an estimator + without actually copying attached data. It returns a new estimator + with the same parameters that has not been fitted on any data. + + .. versionchanged:: 1.3 + Delegates to `estimator.__sklearn_clone__` if the method exists. + + Parameters + ---------- + estimator : {list, tuple, set} of estimator instance or a single \ + estimator instance + The estimator or group of estimators to be cloned. + safe : bool, default=True + If safe is False, clone will fall back to a deep copy on objects + that are not estimators. Ignored if `estimator.__sklearn_clone__` + exists. + + Returns + ------- + estimator : object + The deep copy of the input, an estimator if input is an estimator. + + Notes + ----- + If the estimator's `random_state` parameter is an integer (or if the + estimator doesn't have a `random_state` parameter), an *exact clone* is + returned: the clone and the original estimator will give the exact same + results. Otherwise, *statistical clone* is returned: the clone might + return different results from the original estimator. More details can be + found in :ref:`randomness`. + + Examples + -------- + >>> from sklearn.base import clone + >>> from sklearn.linear_model import LogisticRegression + >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]] + >>> y = [0, 0, 1, 1] + >>> classifier = LogisticRegression().fit(X, y) + >>> cloned_classifier = clone(classifier) + >>> hasattr(classifier, "classes_") + True + >>> hasattr(cloned_classifier, "classes_") + False + >>> classifier is cloned_classifier + False + """ + if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator): + return estimator.__sklearn_clone__() + return _clone_parametrized(estimator, safe=safe) + + +def _clone_parametrized(estimator, *, safe=True): + """Default implementation of clone. See :func:`sklearn.base.clone` for details.""" + + estimator_type = type(estimator) + if estimator_type is dict: + return {k: clone(v, safe=safe) for k, v in estimator.items()} + elif estimator_type in (list, tuple, set, frozenset): + return estimator_type([clone(e, safe=safe) for e in estimator]) + elif not hasattr(estimator, "get_params") or isinstance(estimator, type): + if not safe: + return copy.deepcopy(estimator) + else: + if isinstance(estimator, type): + raise TypeError( + "Cannot clone object. " + + "You should provide an instance of " + + "scikit-learn estimator instead of a class." + ) + else: + raise TypeError( + "Cannot clone object '%s' (type %s): " + "it does not seem to be a scikit-learn " + "estimator as it does not implement a " + "'get_params' method." % (repr(estimator), type(estimator)) + ) + + klass = estimator.__class__ + new_object_params = estimator.get_params(deep=False) + for name, param in new_object_params.items(): + new_object_params[name] = clone(param, safe=False) + + new_object = klass(**new_object_params) + try: + new_object._metadata_request = copy.deepcopy(estimator._metadata_request) + except AttributeError: + pass + + params_set = new_object.get_params(deep=False) + + # quick sanity check of the parameters of the clone + for name in new_object_params: + param1 = new_object_params[name] + param2 = params_set[name] + if param1 is not param2: + raise RuntimeError( + "Cannot clone object %s, as the constructor " + "either does not set or modifies parameter %s" % (estimator, name) + ) + + # _sklearn_output_config is used by `set_output` to configure the output + # container of an estimator. + if hasattr(estimator, "_sklearn_output_config"): + new_object._sklearn_output_config = copy.deepcopy( + estimator._sklearn_output_config + ) + return new_object + + +class BaseEstimator(_HTMLDocumentationLinkMixin, _MetadataRequester): + """Base class for all estimators in scikit-learn. + + Inheriting from this class provides default implementations of: + + - setting and getting parameters used by `GridSearchCV` and friends; + - textual and HTML representation displayed in terminals and IDEs; + - estimator serialization; + - parameters validation; + - data validation; + - feature names validation. + + Read more in the :ref:`User Guide `. + + + Notes + ----- + All estimators should specify all the parameters that can be set + at the class level in their ``__init__`` as explicit keyword + arguments (no ``*args`` or ``**kwargs``). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator + >>> class MyEstimator(BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=2) + >>> estimator.get_params() + {'param': 2} + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([2, 2, 2]) + >>> estimator.set_params(param=3).fit(X, y).predict(X) + array([3, 3, 3]) + """ + + @classmethod + def _get_param_names(cls): + """Get parameter names for the estimator""" + # fetch the constructor or the original constructor before + # deprecation wrapping if any + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + if init is object.__init__: + # No explicit constructor to introspect + return [] + + # introspect the constructor arguments to find the model parameters + # to represent + init_signature = inspect.signature(init) + # Consider the constructor parameters excluding 'self' + parameters = [ + p + for p in init_signature.parameters.values() + if p.name != "self" and p.kind != p.VAR_KEYWORD + ] + for p in parameters: + if p.kind == p.VAR_POSITIONAL: + raise RuntimeError( + "scikit-learn estimators should always " + "specify their parameters in the signature" + " of their __init__ (no varargs)." + " %s with constructor %s doesn't " + " follow this convention." % (cls, init_signature) + ) + # Extract and sort argument names excluding 'self' + return sorted([p.name for p in parameters]) + + def get_params(self, deep=True): + """ + Get parameters for this estimator. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + out = dict() + for key in self._get_param_names(): + value = getattr(self, key) + if deep and hasattr(value, "get_params") and not isinstance(value, type): + deep_items = value.get_params().items() + out.update((key + "__" + k, val) for k, val in deep_items) + out[key] = value + return out + + def set_params(self, **params): + """Set the parameters of this estimator. + + The method works on simple estimators as well as on nested objects + (such as :class:`~sklearn.pipeline.Pipeline`). The latter have + parameters of the form ``__`` so that it's + possible to update each component of a nested object. + + Parameters + ---------- + **params : dict + Estimator parameters. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + if not params: + # Simple optimization to gain speed (inspect is slow) + return self + valid_params = self.get_params(deep=True) + + nested_params = defaultdict(dict) # grouped by prefix + for key, value in params.items(): + key, delim, sub_key = key.partition("__") + if key not in valid_params: + local_valid_params = self._get_param_names() + raise ValueError( + f"Invalid parameter {key!r} for estimator {self}. " + f"Valid parameters are: {local_valid_params!r}." + ) + + if delim: + nested_params[key][sub_key] = value + else: + setattr(self, key, value) + valid_params[key] = value + + for key, sub_params in nested_params.items(): + valid_params[key].set_params(**sub_params) + + return self + + def __sklearn_clone__(self): + return _clone_parametrized(self) + + def __repr__(self, N_CHAR_MAX=700): + # N_CHAR_MAX is the (approximate) maximum number of non-blank + # characters to render. We pass it as an optional parameter to ease + # the tests. + + from .utils._pprint import _EstimatorPrettyPrinter + + N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences + + # use ellipsis for sequences with a lot of elements + pp = _EstimatorPrettyPrinter( + compact=True, + indent=1, + indent_at_name=True, + n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW, + ) + + repr_ = pp.pformat(self) + + # Use bruteforce ellipsis when there are a lot of non-blank characters + n_nonblank = len("".join(repr_.split())) + if n_nonblank > N_CHAR_MAX: + lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends + regex = r"^(\s*\S){%d}" % lim + # The regex '^(\s*\S){%d}' % n + # matches from the start of the string until the nth non-blank + # character: + # - ^ matches the start of string + # - (pattern){n} matches n repetitions of pattern + # - \s*\S matches a non-blank char following zero or more blanks + left_lim = re.match(regex, repr_).end() + right_lim = re.match(regex, repr_[::-1]).end() + + if "\n" in repr_[left_lim:-right_lim]: + # The left side and right side aren't on the same line. + # To avoid weird cuts, e.g.: + # categoric...ore', + # we need to start the right side with an appropriate newline + # character so that it renders properly as: + # categoric... + # handle_unknown='ignore', + # so we add [^\n]*\n which matches until the next \n + regex += r"[^\n]*\n" + right_lim = re.match(regex, repr_[::-1]).end() + + ellipsis = "..." + if left_lim + len(ellipsis) < len(repr_) - right_lim: + # Only add ellipsis if it results in a shorter repr + repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:] + + return repr_ + + def __getstate__(self): + if getattr(self, "__slots__", None): + raise TypeError( + "You cannot use `__slots__` in objects inheriting from " + "`sklearn.base.BaseEstimator`." + ) + + try: + state = super().__getstate__() + if state is None: + # For Python 3.11+, empty instance (no `__slots__`, + # and `__dict__`) will return a state equal to `None`. + state = self.__dict__.copy() + except AttributeError: + # Python < 3.11 + state = self.__dict__.copy() + + if type(self).__module__.startswith("sklearn."): + return dict(state.items(), _sklearn_version=__version__) + else: + return state + + def __setstate__(self, state): + if type(self).__module__.startswith("sklearn."): + pickle_version = state.pop("_sklearn_version", "pre-0.18") + if pickle_version != __version__: + warnings.warn( + InconsistentVersionWarning( + estimator_name=self.__class__.__name__, + current_sklearn_version=__version__, + original_sklearn_version=pickle_version, + ), + ) + try: + super().__setstate__(state) + except AttributeError: + self.__dict__.update(state) + + def _more_tags(self): + return _DEFAULT_TAGS + + def _get_tags(self): + collected_tags = {} + for base_class in reversed(inspect.getmro(self.__class__)): + if hasattr(base_class, "_more_tags"): + # need the if because mixins might not have _more_tags + # but might do redundant work in estimators + # (i.e. calling more tags on BaseEstimator multiple times) + more_tags = base_class._more_tags(self) + collected_tags.update(more_tags) + return collected_tags + + def _check_n_features(self, X, reset): + """Set the `n_features_in_` attribute, or check against it. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples. + reset : bool + If True, the `n_features_in_` attribute is set to `X.shape[1]`. + If False and the attribute exists, then check that it is equal to + `X.shape[1]`. If False and the attribute does *not* exist, then + the check is skipped. + .. note:: + It is recommended to call reset=True in `fit` and in the first + call to `partial_fit`. All other methods that validate `X` + should set `reset=False`. + """ + try: + n_features = _num_features(X) + except TypeError as e: + if not reset and hasattr(self, "n_features_in_"): + raise ValueError( + "X does not contain any features, but " + f"{self.__class__.__name__} is expecting " + f"{self.n_features_in_} features" + ) from e + # If the number of features is not defined and reset=True, + # then we skip this check + return + + if reset: + self.n_features_in_ = n_features + return + + if not hasattr(self, "n_features_in_"): + # Skip this check if the expected number of expected input features + # was not recorded by calling fit first. This is typically the case + # for stateless transformers. + return + + if n_features != self.n_features_in_: + raise ValueError( + f"X has {n_features} features, but {self.__class__.__name__} " + f"is expecting {self.n_features_in_} features as input." + ) + + def _check_feature_names(self, X, *, reset): + """Set or check the `feature_names_in_` attribute. + + .. versionadded:: 1.0 + + Parameters + ---------- + X : {ndarray, dataframe} of shape (n_samples, n_features) + The input samples. + + reset : bool + Whether to reset the `feature_names_in_` attribute. + If False, the input will be checked for consistency with + feature names of data provided when reset was last True. + .. note:: + It is recommended to call `reset=True` in `fit` and in the first + call to `partial_fit`. All other methods that validate `X` + should set `reset=False`. + """ + + if reset: + feature_names_in = _get_feature_names(X) + if feature_names_in is not None: + self.feature_names_in_ = feature_names_in + elif hasattr(self, "feature_names_in_"): + # Delete the attribute when the estimator is fitted on a new dataset + # that has no feature names. + delattr(self, "feature_names_in_") + return + + fitted_feature_names = getattr(self, "feature_names_in_", None) + X_feature_names = _get_feature_names(X) + + if fitted_feature_names is None and X_feature_names is None: + # no feature names seen in fit and in X + return + + if X_feature_names is not None and fitted_feature_names is None: + warnings.warn( + f"X has feature names, but {self.__class__.__name__} was fitted without" + " feature names" + ) + return + + if X_feature_names is None and fitted_feature_names is not None: + warnings.warn( + "X does not have valid feature names, but" + f" {self.__class__.__name__} was fitted with feature names" + ) + return + + # validate the feature names against the `feature_names_in_` attribute + if len(fitted_feature_names) != len(X_feature_names) or np.any( + fitted_feature_names != X_feature_names + ): + message = ( + "The feature names should match those that were passed during fit.\n" + ) + fitted_feature_names_set = set(fitted_feature_names) + X_feature_names_set = set(X_feature_names) + + unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set) + missing_names = sorted(fitted_feature_names_set - X_feature_names_set) + + def add_names(names): + output = "" + max_n_names = 5 + for i, name in enumerate(names): + if i >= max_n_names: + output += "- ...\n" + break + output += f"- {name}\n" + return output + + if unexpected_names: + message += "Feature names unseen at fit time:\n" + message += add_names(unexpected_names) + + if missing_names: + message += "Feature names seen at fit time, yet now missing:\n" + message += add_names(missing_names) + + if not missing_names and not unexpected_names: + message += ( + "Feature names must be in the same order as they were in fit.\n" + ) + + raise ValueError(message) + + def _validate_data( + self, + X="no_validation", + y="no_validation", + reset=True, + validate_separately=False, + cast_to_ndarray=True, + **check_params, + ): + """Validate input data and set or check the `n_features_in_` attribute. + + Parameters + ---------- + X : {array-like, sparse matrix, dataframe} of shape \ + (n_samples, n_features), default='no validation' + The input samples. + If `'no_validation'`, no validation is performed on `X`. This is + useful for meta-estimator which can delegate input validation to + their underlying estimator(s). In that case `y` must be passed and + the only accepted `check_params` are `multi_output` and + `y_numeric`. + + y : array-like of shape (n_samples,), default='no_validation' + The targets. + + - If `None`, `check_array` is called on `X`. If the estimator's + requires_y tag is True, then an error will be raised. + - If `'no_validation'`, `check_array` is called on `X` and the + estimator's requires_y tag is ignored. This is a default + placeholder and is never meant to be explicitly set. In that case + `X` must be passed. + - Otherwise, only `y` with `_check_y` or both `X` and `y` are + checked with either `check_array` or `check_X_y` depending on + `validate_separately`. + + reset : bool, default=True + Whether to reset the `n_features_in_` attribute. + If False, the input will be checked for consistency with data + provided when reset was last True. + .. note:: + It is recommended to call reset=True in `fit` and in the first + call to `partial_fit`. All other methods that validate `X` + should set `reset=False`. + + validate_separately : False or tuple of dicts, default=False + Only used if y is not None. + If False, call validate_X_y(). Else, it must be a tuple of kwargs + to be used for calling check_array() on X and y respectively. + + `estimator=self` is automatically added to these dicts to generate + more informative error message in case of invalid input data. + + cast_to_ndarray : bool, default=True + Cast `X` and `y` to ndarray with checks in `check_params`. If + `False`, `X` and `y` are unchanged and only `feature_names_in_` and + `n_features_in_` are checked. + + **check_params : kwargs + Parameters passed to :func:`sklearn.utils.check_array` or + :func:`sklearn.utils.check_X_y`. Ignored if validate_separately + is not False. + + `estimator=self` is automatically added to these params to generate + more informative error message in case of invalid input data. + + Returns + ------- + out : {ndarray, sparse matrix} or tuple of these + The validated input. A tuple is returned if both `X` and `y` are + validated. + """ + self._check_feature_names(X, reset=reset) + + if y is None and self._get_tags()["requires_y"]: + raise ValueError( + f"This {self.__class__.__name__} estimator " + "requires y to be passed, but the target y is None." + ) + + no_val_X = isinstance(X, str) and X == "no_validation" + no_val_y = y is None or isinstance(y, str) and y == "no_validation" + + if no_val_X and no_val_y: + raise ValueError("Validation should be done on X, y or both.") + + default_check_params = {"estimator": self} + check_params = {**default_check_params, **check_params} + + if not cast_to_ndarray: + if not no_val_X and no_val_y: + out = X + elif no_val_X and not no_val_y: + out = y + else: + out = X, y + elif not no_val_X and no_val_y: + out = check_array(X, input_name="X", **check_params) + elif no_val_X and not no_val_y: + out = _check_y(y, **check_params) + else: + if validate_separately: + # We need this because some estimators validate X and y + # separately, and in general, separately calling check_array() + # on X and y isn't equivalent to just calling check_X_y() + # :( + check_X_params, check_y_params = validate_separately + if "estimator" not in check_X_params: + check_X_params = {**default_check_params, **check_X_params} + X = check_array(X, input_name="X", **check_X_params) + if "estimator" not in check_y_params: + check_y_params = {**default_check_params, **check_y_params} + y = check_array(y, input_name="y", **check_y_params) + else: + X, y = check_X_y(X, y, **check_params) + out = X, y + + if not no_val_X and check_params.get("ensure_2d", True): + self._check_n_features(X, reset=reset) + + return out + + def _validate_params(self): + """Validate types and values of constructor parameters + + The expected type and values must be defined in the `_parameter_constraints` + class attribute, which is a dictionary `param_name: list of constraints`. See + the docstring of `validate_parameter_constraints` for a description of the + accepted constraints. + """ + validate_parameter_constraints( + self._parameter_constraints, + self.get_params(deep=False), + caller_name=self.__class__.__name__, + ) + + @property + def _repr_html_(self): + """HTML representation of estimator. + + This is redundant with the logic of `_repr_mimebundle_`. The latter + should be favorted in the long term, `_repr_html_` is only + implemented for consumers who do not interpret `_repr_mimbundle_`. + """ + if get_config()["display"] != "diagram": + raise AttributeError( + "_repr_html_ is only defined when the " + "'display' configuration option is set to " + "'diagram'" + ) + return self._repr_html_inner + + def _repr_html_inner(self): + """This function is returned by the @property `_repr_html_` to make + `hasattr(estimator, "_repr_html_") return `True` or `False` depending + on `get_config()["display"]`. + """ + return estimator_html_repr(self) + + def _repr_mimebundle_(self, **kwargs): + """Mime bundle used by jupyter kernels to display estimator""" + output = {"text/plain": repr(self)} + if get_config()["display"] == "diagram": + output["text/html"] = estimator_html_repr(self) + return output + + +class ClassifierMixin: + """Mixin class for all classifiers in scikit-learn. + + This mixin defines the following functionality: + + - `_estimator_type` class attribute defaulting to `"classifier"`; + - `score` method that default to :func:`~sklearn.metrics.accuracy_score`. + - enforce that `fit` requires `y` to be passed through the `requires_y` tag. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, ClassifierMixin + >>> # Mixin classes should always be on the left-hand side for a correct MRO + >>> class MyEstimator(ClassifierMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=1) + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([1, 1, 1]) + >>> estimator.score(X, y) + 0.66... + """ + + _estimator_type = "classifier" + + def score(self, X, y, sample_weight=None): + """ + Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for `X`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of ``self.predict(X)`` w.r.t. `y`. + """ + from .metrics import accuracy_score + + return accuracy_score(y, self.predict(X), sample_weight=sample_weight) + + def _more_tags(self): + return {"requires_y": True} + + +class RegressorMixin: + """Mixin class for all regression estimators in scikit-learn. + + This mixin defines the following functionality: + + - `_estimator_type` class attribute defaulting to `"regressor"`; + - `score` method that default to :func:`~sklearn.metrics.r2_score`. + - enforce that `fit` requires `y` to be passed through the `requires_y` tag. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, RegressorMixin + >>> # Mixin classes should always be on the left-hand side for a correct MRO + >>> class MyEstimator(RegressorMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=0) + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([-1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([0, 0, 0]) + >>> estimator.score(X, y) + 0.0 + """ + + _estimator_type = "regressor" + + def score(self, X, y, sample_weight=None): + """Return the coefficient of determination of the prediction. + + The coefficient of determination :math:`R^2` is defined as + :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual + sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v` + is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``. + The best possible score is 1.0 and it can be negative (because the + model can be arbitrarily worse). A constant model that always predicts + the expected value of `y`, disregarding the input features, would get + a :math:`R^2` score of 0.0. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. For some estimators this may be a precomputed + kernel matrix or a list of generic objects instead with shape + ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted`` + is the number of samples used in the fitting for the estimator. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True values for `X`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + :math:`R^2` of ``self.predict(X)`` w.r.t. `y`. + + Notes + ----- + The :math:`R^2` score used when calling ``score`` on a regressor uses + ``multioutput='uniform_average'`` from version 0.23 to keep consistent + with default value of :func:`~sklearn.metrics.r2_score`. + This influences the ``score`` method of all the multioutput + regressors (except for + :class:`~sklearn.multioutput.MultiOutputRegressor`). + """ + + from .metrics import r2_score + + y_pred = self.predict(X) + return r2_score(y, y_pred, sample_weight=sample_weight) + + def _more_tags(self): + return {"requires_y": True} + + +class ClusterMixin: + """Mixin class for all cluster estimators in scikit-learn. + + - `_estimator_type` class attribute defaulting to `"clusterer"`; + - `fit_predict` method returning the cluster labels associated to each sample. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, ClusterMixin + >>> class MyClusterer(ClusterMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64) + ... return self + >>> X = [[1, 2], [2, 3], [3, 4]] + >>> MyClusterer().fit_predict(X) + array([1, 1, 1]) + """ + + _estimator_type = "clusterer" + + def fit_predict(self, X, y=None, **kwargs): + """ + Perform clustering on `X` and returns cluster labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : Ignored + Not used, present for API consistency by convention. + + **kwargs : dict + Arguments to be passed to ``fit``. + + .. versionadded:: 1.4 + + Returns + ------- + labels : ndarray of shape (n_samples,), dtype=np.int64 + Cluster labels. + """ + # non-optimized default implementation; override when a better + # method is possible for a given clustering algorithm + self.fit(X, **kwargs) + return self.labels_ + + def _more_tags(self): + return {"preserves_dtype": []} + + +class BiclusterMixin: + """Mixin class for all bicluster estimators in scikit-learn. + + This mixin defines the following functionality: + + - `biclusters_` property that returns the row and column indicators; + - `get_indices` method that returns the row and column indices of a bicluster; + - `get_shape` method that returns the shape of a bicluster; + - `get_submatrix` method that returns the submatrix corresponding to a bicluster. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, BiclusterMixin + >>> class DummyBiClustering(BiclusterMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool) + ... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool) + ... return self + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> bicluster = DummyBiClustering().fit(X) + >>> hasattr(bicluster, "biclusters_") + True + >>> bicluster.get_indices(0) + (array([0, 1, 2, 3, 4, 5]), array([0, 1])) + """ + + @property + def biclusters_(self): + """Convenient way to get row and column indicators together. + + Returns the ``rows_`` and ``columns_`` members. + """ + return self.rows_, self.columns_ + + def get_indices(self, i): + """Row and column indices of the `i`'th bicluster. + + Only works if ``rows_`` and ``columns_`` attributes exist. + + Parameters + ---------- + i : int + The index of the cluster. + + Returns + ------- + row_ind : ndarray, dtype=np.intp + Indices of rows in the dataset that belong to the bicluster. + col_ind : ndarray, dtype=np.intp + Indices of columns in the dataset that belong to the bicluster. + """ + rows = self.rows_[i] + columns = self.columns_[i] + return np.nonzero(rows)[0], np.nonzero(columns)[0] + + def get_shape(self, i): + """Shape of the `i`'th bicluster. + + Parameters + ---------- + i : int + The index of the cluster. + + Returns + ------- + n_rows : int + Number of rows in the bicluster. + + n_cols : int + Number of columns in the bicluster. + """ + indices = self.get_indices(i) + return tuple(len(i) for i in indices) + + def get_submatrix(self, i, data): + """Return the submatrix corresponding to bicluster `i`. + + Parameters + ---------- + i : int + The index of the cluster. + data : array-like of shape (n_samples, n_features) + The data. + + Returns + ------- + submatrix : ndarray of shape (n_rows, n_cols) + The submatrix corresponding to bicluster `i`. + + Notes + ----- + Works with sparse matrices. Only works if ``rows_`` and + ``columns_`` attributes exist. + """ + from .utils.validation import check_array + + data = check_array(data, accept_sparse="csr") + row_ind, col_ind = self.get_indices(i) + return data[row_ind[:, np.newaxis], col_ind] + + +class TransformerMixin(_SetOutputMixin): + """Mixin class for all transformers in scikit-learn. + + This mixin defines the following functionality: + + - a `fit_transform` method that delegates to `fit` and `transform`; + - a `set_output` method to output `X` as a specific container type. + + If :term:`get_feature_names_out` is defined, then :class:`BaseEstimator` will + automatically wrap `transform` and `fit_transform` to follow the `set_output` + API. See the :ref:`developer_api_set_output` for details. + + :class:`OneToOneFeatureMixin` and + :class:`ClassNamePrefixFeaturesOutMixin` are helpful mixins for + defining :term:`get_feature_names_out`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, TransformerMixin + >>> class MyTransformer(TransformerMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... return self + ... def transform(self, X): + ... return np.full(shape=len(X), fill_value=self.param) + >>> transformer = MyTransformer() + >>> X = [[1, 2], [2, 3], [3, 4]] + >>> transformer.fit_transform(X) + array([1, 1, 1]) + """ + + def fit_transform(self, X, y=None, **fit_params): + """ + Fit to data, then transform it. + + Fits transformer to `X` and `y` with optional parameters `fit_params` + and returns a transformed version of `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + **fit_params : dict + Additional fit parameters. + + Returns + ------- + X_new : ndarray array of shape (n_samples, n_features_new) + Transformed array. + """ + # non-optimized default implementation; override when a better + # method is possible for a given clustering algorithm + + # we do not route parameters here, since consumers don't route. But + # since it's possible for a `transform` method to also consume + # metadata, we check if that's the case, and we raise a warning telling + # users that they should implement a custom `fit_transform` method + # to forward metadata to `transform` as well. + # + # For that, we calculate routing and check if anything would be routed + # to `transform` if we were to route them. + if _routing_enabled(): + transform_params = self.get_metadata_routing().consumes( + method="transform", params=fit_params.keys() + ) + if transform_params: + warnings.warn( + ( + f"This object ({self.__class__.__name__}) has a `transform`" + " method which consumes metadata, but `fit_transform` does not" + " forward metadata to `transform`. Please implement a custom" + " `fit_transform` method to forward metadata to `transform` as" + " well. Alternatively, you can explicitly do" + " `set_transform_request`and set all values to `False` to" + " disable metadata routed to `transform`, if that's an option." + ), + UserWarning, + ) + + if y is None: + # fit method of arity 1 (unsupervised transformation) + return self.fit(X, **fit_params).transform(X) + else: + # fit method of arity 2 (supervised transformation) + return self.fit(X, y, **fit_params).transform(X) + + +class OneToOneFeatureMixin: + """Provides `get_feature_names_out` for simple transformers. + + This mixin assumes there's a 1-to-1 correspondence between input features + and output features, such as :class:`~sklearn.preprocessing.StandardScaler`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import OneToOneFeatureMixin + >>> class MyEstimator(OneToOneFeatureMixin): + ... def fit(self, X, y=None): + ... self.n_features_in_ = X.shape[1] + ... return self + >>> X = np.array([[1, 2], [3, 4]]) + >>> MyEstimator().fit(X).get_feature_names_out() + array(['x0', 'x1'], dtype=object) + """ + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Same as input features. + """ + check_is_fitted(self, "n_features_in_") + return _check_feature_names_in(self, input_features) + + +class ClassNamePrefixFeaturesOutMixin: + """Mixin class for transformers that generate their own names by prefixing. + + This mixin is useful when the transformer needs to generate its own feature + names out, such as :class:`~sklearn.decomposition.PCA`. For example, if + :class:`~sklearn.decomposition.PCA` outputs 3 features, then the generated feature + names out are: `["pca0", "pca1", "pca2"]`. + + This mixin assumes that a `_n_features_out` attribute is defined when the + transformer is fitted. `_n_features_out` is the number of output features + that the transformer will return in `transform` of `fit_transform`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import ClassNamePrefixFeaturesOutMixin + >>> class MyEstimator(ClassNamePrefixFeaturesOutMixin): + ... def fit(self, X, y=None): + ... self._n_features_out = X.shape[1] + ... return self + >>> X = np.array([[1, 2], [3, 4]]) + >>> MyEstimator().fit(X).get_feature_names_out() + array(['myestimator0', 'myestimator1'], dtype=object) + """ + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + The feature names out will prefixed by the lowercased class name. For + example, if the transformer outputs 3 features, then the feature names + out are: `["class_name0", "class_name1", "class_name2"]`. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Only used to validate feature names with the names seen in `fit`. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "_n_features_out") + return _generate_get_feature_names_out( + self, self._n_features_out, input_features=input_features + ) + + +class DensityMixin: + """Mixin class for all density estimators in scikit-learn. + + This mixin defines the following functionality: + + - `_estimator_type` class attribute defaulting to `"DensityEstimator"`; + - `score` method that default that do no-op. + + Examples + -------- + >>> from sklearn.base import DensityMixin + >>> class MyEstimator(DensityMixin): + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + >>> estimator = MyEstimator() + >>> hasattr(estimator, "score") + True + """ + + _estimator_type = "DensityEstimator" + + def score(self, X, y=None): + """Return the score of the model on the data `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + score : float + """ + pass + + +class OutlierMixin: + """Mixin class for all outlier detection estimators in scikit-learn. + + This mixin defines the following functionality: + + - `_estimator_type` class attribute defaulting to `outlier_detector`; + - `fit_predict` method that default to `fit` and `predict`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, OutlierMixin + >>> class MyEstimator(OutlierMixin): + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.ones(shape=len(X)) + >>> estimator = MyEstimator() + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> estimator.fit_predict(X) + array([1., 1., 1.]) + """ + + _estimator_type = "outlier_detector" + + def fit_predict(self, X, y=None, **kwargs): + """Perform fit on X and returns labels for X. + + Returns -1 for outliers and 1 for inliers. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + y : Ignored + Not used, present for API consistency by convention. + + **kwargs : dict + Arguments to be passed to ``fit``. + + .. versionadded:: 1.4 + + Returns + ------- + y : ndarray of shape (n_samples,) + 1 for inliers, -1 for outliers. + """ + # we do not route parameters here, since consumers don't route. But + # since it's possible for a `predict` method to also consume + # metadata, we check if that's the case, and we raise a warning telling + # users that they should implement a custom `fit_predict` method + # to forward metadata to `predict` as well. + # + # For that, we calculate routing and check if anything would be routed + # to `predict` if we were to route them. + if _routing_enabled(): + transform_params = self.get_metadata_routing().consumes( + method="predict", params=kwargs.keys() + ) + if transform_params: + warnings.warn( + ( + f"This object ({self.__class__.__name__}) has a `predict` " + "method which consumes metadata, but `fit_predict` does not " + "forward metadata to `predict`. Please implement a custom " + "`fit_predict` method to forward metadata to `predict` as well." + "Alternatively, you can explicitly do `set_predict_request`" + "and set all values to `False` to disable metadata routed to " + "`predict`, if that's an option." + ), + UserWarning, + ) + + # override for transductive outlier detectors like LocalOulierFactor + return self.fit(X, **kwargs).predict(X) + + +class MetaEstimatorMixin: + """Mixin class for all meta estimators in scikit-learn. + + This mixin defines the following functionality: + + - define `_required_parameters` that specify the mandatory `estimator` parameter. + + Examples + -------- + >>> from sklearn.base import MetaEstimatorMixin + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> class MyEstimator(MetaEstimatorMixin): + ... def __init__(self, *, estimator=None): + ... self.estimator = estimator + ... def fit(self, X, y=None): + ... if self.estimator is None: + ... self.estimator_ = LogisticRegression() + ... else: + ... self.estimator_ = self.estimator + ... return self + >>> X, y = load_iris(return_X_y=True) + >>> estimator = MyEstimator().fit(X, y) + >>> estimator.estimator_ + LogisticRegression() + """ + + _required_parameters = ["estimator"] + + +class MultiOutputMixin: + """Mixin to mark estimators that support multioutput.""" + + def _more_tags(self): + return {"multioutput": True} + + +class _UnstableArchMixin: + """Mark estimators that are non-determinstic on 32bit or PowerPC""" + + def _more_tags(self): + return { + "non_deterministic": _IS_32BIT or platform.machine().startswith( + ("ppc", "powerpc") + ) + } + + +def is_classifier(estimator): + """Return True if the given estimator is (probably) a classifier. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a classifier and False otherwise. + + Examples + -------- + >>> from sklearn.base import is_classifier + >>> from sklearn.svm import SVC, SVR + >>> classifier = SVC() + >>> regressor = SVR() + >>> is_classifier(classifier) + True + >>> is_classifier(regressor) + False + """ + return getattr(estimator, "_estimator_type", None) == "classifier" + + +def is_regressor(estimator): + """Return True if the given estimator is (probably) a regressor. + + Parameters + ---------- + estimator : estimator instance + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a regressor and False otherwise. + + Examples + -------- + >>> from sklearn.base import is_regressor + >>> from sklearn.svm import SVC, SVR + >>> classifier = SVC() + >>> regressor = SVR() + >>> is_regressor(classifier) + False + >>> is_regressor(regressor) + True + """ + return getattr(estimator, "_estimator_type", None) == "regressor" + + +def is_outlier_detector(estimator): + """Return True if the given estimator is (probably) an outlier detector. + + Parameters + ---------- + estimator : estimator instance + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is an outlier detector and False otherwise. + """ + return getattr(estimator, "_estimator_type", None) == "outlier_detector" + + +def _fit_context(*, prefer_skip_nested_validation): + """Decorator to run the fit methods of estimators within context managers. + + Parameters + ---------- + prefer_skip_nested_validation : bool + If True, the validation of parameters of inner estimators or functions + called during fit will be skipped. + + This is useful to avoid validating many times the parameters passed by the + user from the public facing API. It's also useful to avoid validating + parameters that we pass internally to inner functions that are guaranteed to + be valid by the test suite. + + It should be set to True for most estimators, except for those that receive + non-validated objects as parameters, such as meta-estimators that are given + estimator objects. + + Returns + ------- + decorated_fit : method + The decorated fit method. + """ + + def decorator(fit_method): + @functools.wraps(fit_method) + def wrapper(estimator, *args, **kwargs): + global_skip_validation = get_config()["skip_parameter_validation"] + + # we don't want to validate again for each call to partial_fit + partial_fit_and_fitted = ( + fit_method.__name__ == "partial_fit" and _is_fitted(estimator) + ) + + if not global_skip_validation and not partial_fit_and_fitted: + estimator._validate_params() + + with config_context( + skip_parameter_validation=( + prefer_skip_nested_validation or global_skip_validation + ) + ): + return fit_method(estimator, *args, **kwargs) + + return wrapper + + return decorator diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/calibration.py b/llmeval-env/lib/python3.10/site-packages/sklearn/calibration.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f0b8ec59551b336ccf7e7692cc88a50d75de4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/calibration.py @@ -0,0 +1,1410 @@ +"""Calibration of predicted probabilities.""" + +# Author: Alexandre Gramfort +# Balazs Kegl +# Jan Hendrik Metzen +# Mathieu Blondel +# +# License: BSD 3 clause + +import warnings +from inspect import signature +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy.optimize import minimize +from scipy.special import expit + +from sklearn.utils import Bunch + +from ._loss import HalfBinomialLoss +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + _fit_context, + clone, +) +from .isotonic import IsotonicRegression +from .model_selection import check_cv, cross_val_predict +from .preprocessing import LabelEncoder, label_binarize +from .svm import LinearSVC +from .utils import ( + _safe_indexing, + column_or_1d, + indexable, +) +from .utils._param_validation import ( + HasMethods, + Interval, + StrOptions, + validate_params, +) +from .utils._plotting import _BinaryClassifierCurveDisplayMixin +from .utils._response import _get_response_values, _process_predict_proba +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from .utils.multiclass import check_classification_targets +from .utils.parallel import Parallel, delayed +from .utils.validation import ( + _check_method_params, + _check_pos_label_consistency, + _check_response_method, + _check_sample_weight, + _num_samples, + check_consistent_length, + check_is_fitted, +) + + +class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator): + """Probability calibration with isotonic regression or logistic regression. + + This class uses cross-validation to both estimate the parameters of a + classifier and subsequently calibrate a classifier. With default + `ensemble=True`, for each cv split it + fits a copy of the base estimator to the training subset, and calibrates it + using the testing subset. For prediction, predicted probabilities are + averaged across these individual calibrated classifiers. When + `ensemble=False`, cross-validation is used to obtain unbiased predictions, + via :func:`~sklearn.model_selection.cross_val_predict`, which are then + used for calibration. For prediction, the base estimator, trained using all + the data, is used. This is the prediction method implemented when + `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC` + estimators (see :ref:`User Guide ` for details). + + Already fitted classifiers can be calibrated via the parameter + `cv="prefit"`. In this case, no cross-validation is used and all provided + data is used for calibration. The user has to take care manually that data + for model fitting and calibration are disjoint. + + The calibration is based on the :term:`decision_function` method of the + `estimator` if it exists, else on :term:`predict_proba`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator instance, default=None + The classifier whose output need to be calibrated to provide more + accurate `predict_proba` outputs. The default classifier is + a :class:`~sklearn.svm.LinearSVC`. + + .. versionadded:: 1.2 + + method : {'sigmoid', 'isotonic'}, default='sigmoid' + The method to use for calibration. Can be 'sigmoid' which + corresponds to Platt's method (i.e. a logistic regression model) or + 'isotonic' which is a non-parametric approach. It is not advised to + use isotonic calibration with too few calibration samples + ``(<<1000)`` since it tends to overfit. + + cv : int, cross-validation generator, iterable or "prefit", \ + default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is + neither binary nor multiclass, :class:`~sklearn.model_selection.KFold` + is used. + + Refer to the :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + If "prefit" is passed, it is assumed that `estimator` has been + fitted already and all data is used for calibration. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. + + Base estimator clones are fitted in parallel across cross-validation + iterations. Therefore parallelism happens only when `cv != "prefit"`. + + See :term:`Glossary ` for more details. + + .. versionadded:: 0.24 + + ensemble : bool, default=True + Determines how the calibrator is fitted when `cv` is not `'prefit'`. + Ignored if `cv='prefit'`. + + If `True`, the `estimator` is fitted using training data, and + calibrated using testing data, for each `cv` fold. The final estimator + is an ensemble of `n_cv` fitted classifier and calibrator pairs, where + `n_cv` is the number of cross-validation folds. The output is the + average predicted probabilities of all pairs. + + If `False`, `cv` is used to compute unbiased predictions, via + :func:`~sklearn.model_selection.cross_val_predict`, which are then + used for calibration. At prediction time, the classifier used is the + `estimator` trained on all the data. + Note that this method is also internally implemented in + :mod:`sklearn.svm` estimators with the `probabilities=True` parameter. + + .. versionadded:: 0.24 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"` \ + or `ensemble=False`) + The list of classifier and calibrator pairs. + + - When `cv="prefit"`, the fitted `estimator` and fitted + calibrator. + - When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted + `estimator` and calibrator pairs. `n_cv` is the number of + cross-validation folds. + - When `cv` is not "prefit" and `ensemble=False`, the `estimator`, + fitted on all the data, and fitted calibrator. + + .. versionchanged:: 0.24 + Single calibrated classifier case when `ensemble=False`. + + See Also + -------- + calibration_curve : Compute true and predicted probabilities + for a calibration curve. + + References + ---------- + .. [1] Obtaining calibrated probability estimates from decision trees + and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 + + .. [2] Transforming Classifier Scores into Accurate Multiclass + Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) + + .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to + Regularized Likelihood Methods, J. Platt, (1999) + + .. [4] Predicting Good Probabilities with Supervised Learning, + A. Niculescu-Mizil & R. Caruana, ICML 2005 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.naive_bayes import GaussianNB + >>> from sklearn.calibration import CalibratedClassifierCV + >>> X, y = make_classification(n_samples=100, n_features=2, + ... n_redundant=0, random_state=42) + >>> base_clf = GaussianNB() + >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3) + >>> calibrated_clf.fit(X, y) + CalibratedClassifierCV(...) + >>> len(calibrated_clf.calibrated_classifiers_) + 3 + >>> calibrated_clf.predict_proba(X)[:5, :] + array([[0.110..., 0.889...], + [0.072..., 0.927...], + [0.928..., 0.071...], + [0.928..., 0.071...], + [0.071..., 0.928...]]) + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_classification(n_samples=100, n_features=2, + ... n_redundant=0, random_state=42) + >>> X_train, X_calib, y_train, y_calib = train_test_split( + ... X, y, random_state=42 + ... ) + >>> base_clf = GaussianNB() + >>> base_clf.fit(X_train, y_train) + GaussianNB() + >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv="prefit") + >>> calibrated_clf.fit(X_calib, y_calib) + CalibratedClassifierCV(...) + >>> len(calibrated_clf.calibrated_classifiers_) + 1 + >>> calibrated_clf.predict_proba([[-0.5, 0.5]]) + array([[0.936..., 0.063...]]) + """ + + _parameter_constraints: dict = { + "estimator": [ + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + None, + ], + "method": [StrOptions({"isotonic", "sigmoid"})], + "cv": ["cv_object", StrOptions({"prefit"})], + "n_jobs": [Integral, None], + "ensemble": ["boolean"], + } + + def __init__( + self, + estimator=None, + *, + method="sigmoid", + cv=None, + n_jobs=None, + ensemble=True, + ): + self.estimator = estimator + self.method = method + self.cv = cv + self.n_jobs = n_jobs + self.ensemble = ensemble + + def _get_estimator(self): + """Resolve which estimator to return (default is LinearSVC)""" + if self.estimator is None: + # we want all classifiers that don't expose a random_state + # to be deterministic (and we don't want to expose this one). + estimator = LinearSVC(random_state=0, dual="auto") + if _routing_enabled(): + estimator.set_fit_request(sample_weight=True) + else: + estimator = self.estimator + + return estimator + + @_fit_context( + # CalibratedClassifierCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None, **fit_params): + """Fit the calibrated model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + **fit_params : dict + Parameters to pass to the `fit` method of the underlying + classifier. + + Returns + ------- + self : object + Returns an instance of self. + """ + check_classification_targets(y) + X, y = indexable(X, y) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + estimator = self._get_estimator() + + self.calibrated_classifiers_ = [] + if self.cv == "prefit": + # `classes_` should be consistent with that of estimator + check_is_fitted(self.estimator, attributes=["classes_"]) + self.classes_ = self.estimator.classes_ + + predictions, _ = _get_response_values( + estimator, + X, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + calibrated_classifier = _fit_calibrator( + estimator, + predictions, + y, + self.classes_, + self.method, + sample_weight, + ) + self.calibrated_classifiers_.append(calibrated_classifier) + else: + # Set `classes_` using all `y` + label_encoder_ = LabelEncoder().fit(y) + self.classes_ = label_encoder_.classes_ + + if _routing_enabled(): + routed_params = process_routing( + self, + "fit", + sample_weight=sample_weight, + **fit_params, + ) + else: + # sample_weight checks + fit_parameters = signature(estimator.fit).parameters + supports_sw = "sample_weight" in fit_parameters + if sample_weight is not None and not supports_sw: + estimator_name = type(estimator).__name__ + warnings.warn( + f"Since {estimator_name} does not appear to accept" + " sample_weight, sample weights will only be used for the" + " calibration itself. This can be caused by a limitation of" + " the current scikit-learn API. See the following issue for" + " more details:" + " https://github.com/scikit-learn/scikit-learn/issues/21134." + " Be warned that the result of the calibration is likely to be" + " incorrect." + ) + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) # no routing for splitter + routed_params.estimator = Bunch(fit=fit_params) + if sample_weight is not None and supports_sw: + routed_params.estimator.fit["sample_weight"] = sample_weight + + # Check that each cross-validation fold can have at least one + # example per class + if isinstance(self.cv, int): + n_folds = self.cv + elif hasattr(self.cv, "n_splits"): + n_folds = self.cv.n_splits + else: + n_folds = None + if n_folds and np.any( + [np.sum(y == class_) < n_folds for class_ in self.classes_] + ): + raise ValueError( + f"Requesting {n_folds}-fold " + "cross-validation but provided less than " + f"{n_folds} examples for at least one class." + ) + cv = check_cv(self.cv, y, classifier=True) + + if self.ensemble: + parallel = Parallel(n_jobs=self.n_jobs) + self.calibrated_classifiers_ = parallel( + delayed(_fit_classifier_calibrator_pair)( + clone(estimator), + X, + y, + train=train, + test=test, + method=self.method, + classes=self.classes_, + sample_weight=sample_weight, + fit_params=routed_params.estimator.fit, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + else: + this_estimator = clone(estimator) + method_name = _check_response_method( + this_estimator, + ["decision_function", "predict_proba"], + ).__name__ + predictions = cross_val_predict( + estimator=this_estimator, + X=X, + y=y, + cv=cv, + method=method_name, + n_jobs=self.n_jobs, + params=routed_params.estimator.fit, + ) + if len(self.classes_) == 2: + # Ensure shape (n_samples, 1) in the binary case + if method_name == "predict_proba": + # Select the probability column of the postive class + predictions = _process_predict_proba( + y_pred=predictions, + target_type="binary", + classes=self.classes_, + pos_label=self.classes_[1], + ) + predictions = predictions.reshape(-1, 1) + + this_estimator.fit(X, y, **routed_params.estimator.fit) + # Note: Here we don't pass on fit_params because the supported + # calibrators don't support fit_params anyway + calibrated_classifier = _fit_calibrator( + this_estimator, + predictions, + y, + self.classes_, + self.method, + sample_weight, + ) + self.calibrated_classifiers_.append(calibrated_classifier) + + first_clf = self.calibrated_classifiers_[0].estimator + if hasattr(first_clf, "n_features_in_"): + self.n_features_in_ = first_clf.n_features_in_ + if hasattr(first_clf, "feature_names_in_"): + self.feature_names_in_ = first_clf.feature_names_in_ + return self + + def predict_proba(self, X): + """Calibrated probabilities of classification. + + This function returns calibrated probabilities of classification + according to each class on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The samples, as accepted by `estimator.predict_proba`. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + The predicted probas. + """ + check_is_fitted(self) + # Compute the arithmetic mean of the predictions of the calibrated + # classifiers + mean_proba = np.zeros((_num_samples(X), len(self.classes_))) + for calibrated_classifier in self.calibrated_classifiers_: + proba = calibrated_classifier.predict_proba(X) + mean_proba += proba + + mean_proba /= len(self.calibrated_classifiers_) + + return mean_proba + + def predict(self, X): + """Predict the target of new samples. + + The predicted class is the class that has the highest probability, + and can thus be different from the prediction of the uncalibrated classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The samples, as accepted by `estimator.predict`. + + Returns + ------- + C : ndarray of shape (n_samples,) + The predicted class. + """ + check_is_fitted(self) + return self.classes_[np.argmax(self.predict_proba(X), axis=1)] + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self._get_estimator(), + method_mapping=MethodMapping().add(callee="fit", caller="fit"), + ) + .add( + splitter=self.cv, + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + ) + return router + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "Due to the cross-validation and sample ordering, removing a sample" + " is not strictly equal to putting is weight to zero. Specific unit" + " tests are added for CalibratedClassifierCV specifically." + ), + } + } + + +def _fit_classifier_calibrator_pair( + estimator, + X, + y, + train, + test, + method, + classes, + sample_weight=None, + fit_params=None, +): + """Fit a classifier/calibration pair on a given train/test split. + + Fit the classifier on the train set, compute its predictions on the test + set and use the predictions as input to fit the calibrator along with the + test labels. + + Parameters + ---------- + estimator : estimator instance + Cloned base estimator. + + X : array-like, shape (n_samples, n_features) + Sample data. + + y : array-like, shape (n_samples,) + Targets. + + train : ndarray, shape (n_train_indices,) + Indices of the training subset. + + test : ndarray, shape (n_test_indices,) + Indices of the testing subset. + + method : {'sigmoid', 'isotonic'} + Method to use for calibration. + + classes : ndarray, shape (n_classes,) + The target classes. + + sample_weight : array-like, default=None + Sample weights for `X`. + + fit_params : dict, default=None + Parameters to pass to the `fit` method of the underlying + classifier. + + Returns + ------- + calibrated_classifier : _CalibratedClassifier instance + """ + fit_params_train = _check_method_params(X, params=fit_params, indices=train) + X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train) + X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test) + + estimator.fit(X_train, y_train, **fit_params_train) + + predictions, _ = _get_response_values( + estimator, + X_test, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + sw_test = None if sample_weight is None else _safe_indexing(sample_weight, test) + calibrated_classifier = _fit_calibrator( + estimator, predictions, y_test, classes, method, sample_weight=sw_test + ) + return calibrated_classifier + + +def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None): + """Fit calibrator(s) and return a `_CalibratedClassifier` + instance. + + `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted. + However, if `n_classes` equals 2, one calibrator is fitted. + + Parameters + ---------- + clf : estimator instance + Fitted classifier. + + predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \ + when binary. + Raw predictions returned by the un-calibrated base classifier. + + y : array-like, shape (n_samples,) + The targets. + + classes : ndarray, shape (n_classes,) + All the prediction classes. + + method : {'sigmoid', 'isotonic'} + The method to use for calibration. + + sample_weight : ndarray, shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + pipeline : _CalibratedClassifier instance + """ + Y = label_binarize(y, classes=classes) + label_encoder = LabelEncoder().fit(classes) + pos_class_indices = label_encoder.transform(clf.classes_) + calibrators = [] + for class_idx, this_pred in zip(pos_class_indices, predictions.T): + if method == "isotonic": + calibrator = IsotonicRegression(out_of_bounds="clip") + else: # "sigmoid" + calibrator = _SigmoidCalibration() + calibrator.fit(this_pred, Y[:, class_idx], sample_weight) + calibrators.append(calibrator) + + pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes) + return pipeline + + +class _CalibratedClassifier: + """Pipeline-like chaining a fitted classifier and its fitted calibrators. + + Parameters + ---------- + estimator : estimator instance + Fitted classifier. + + calibrators : list of fitted estimator instances + List of fitted calibrators (either 'IsotonicRegression' or + '_SigmoidCalibration'). The number of calibrators equals the number of + classes. However, if there are 2 classes, the list contains only one + fitted calibrator. + + classes : array-like of shape (n_classes,) + All the prediction classes. + + method : {'sigmoid', 'isotonic'}, default='sigmoid' + The method to use for calibration. Can be 'sigmoid' which + corresponds to Platt's method or 'isotonic' which is a + non-parametric approach based on isotonic regression. + """ + + def __init__(self, estimator, calibrators, *, classes, method="sigmoid"): + self.estimator = estimator + self.calibrators = calibrators + self.classes = classes + self.method = method + + def predict_proba(self, X): + """Calculate calibrated probabilities. + + Calculates classification calibrated probabilities + for each class, in a one-vs-all manner, for `X`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The sample data. + + Returns + ------- + proba : array, shape (n_samples, n_classes) + The predicted probabilities. Can be exact zeros. + """ + predictions, _ = _get_response_values( + self.estimator, + X, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + n_classes = len(self.classes) + + label_encoder = LabelEncoder().fit(self.classes) + pos_class_indices = label_encoder.transform(self.estimator.classes_) + + proba = np.zeros((_num_samples(X), n_classes)) + for class_idx, this_pred, calibrator in zip( + pos_class_indices, predictions.T, self.calibrators + ): + if n_classes == 2: + # When binary, `predictions` consists only of predictions for + # clf.classes_[1] but `pos_class_indices` = 0 + class_idx += 1 + proba[:, class_idx] = calibrator.predict(this_pred) + + # Normalize the probabilities + if n_classes == 2: + proba[:, 0] = 1.0 - proba[:, 1] + else: + denominator = np.sum(proba, axis=1)[:, np.newaxis] + # In the edge case where for each class calibrator returns a null + # probability for a given sample, use the uniform distribution + # instead. + uniform_proba = np.full_like(proba, 1 / n_classes) + proba = np.divide( + proba, denominator, out=uniform_proba, where=denominator != 0 + ) + + # Deal with cases where the predicted probability minimally exceeds 1.0 + proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 + + return proba + + +# The max_abs_prediction_threshold was approximated using +# logit(np.finfo(np.float64).eps) which is about -36 +def _sigmoid_calibration( + predictions, y, sample_weight=None, max_abs_prediction_threshold=30 +): + """Probability Calibration with sigmoid method (Platt 2000) + + Parameters + ---------- + predictions : ndarray of shape (n_samples,) + The decision function or predict proba for the samples. + + y : ndarray of shape (n_samples,) + The targets. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + a : float + The slope. + + b : float + The intercept. + + References + ---------- + Platt, "Probabilistic Outputs for Support Vector Machines" + """ + predictions = column_or_1d(predictions) + y = column_or_1d(y) + + F = predictions # F follows Platt's notations + + scale_constant = 1.0 + max_prediction = np.max(np.abs(F)) + + # If the predictions have large values we scale them in order to bring + # them within a suitable range. This has no effect on the final + # (prediction) result because linear models like Logisitic Regression + # without a penalty are invariant to multiplying the features by a + # constant. + if max_prediction >= max_abs_prediction_threshold: + scale_constant = max_prediction + # We rescale the features in a copy: inplace rescaling could confuse + # the caller and make the code harder to reason about. + F = F / scale_constant + + # Bayesian priors (see Platt end of section 2.2): + # It corresponds to the number of samples, taking into account the + # `sample_weight`. + mask_negative_samples = y <= 0 + if sample_weight is not None: + prior0 = (sample_weight[mask_negative_samples]).sum() + prior1 = (sample_weight[~mask_negative_samples]).sum() + else: + prior0 = float(np.sum(mask_negative_samples)) + prior1 = y.shape[0] - prior0 + T = np.zeros_like(y, dtype=predictions.dtype) + T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0) + T[y <= 0] = 1.0 / (prior0 + 2.0) + + bin_loss = HalfBinomialLoss() + + def loss_grad(AB): + # .astype below is needed to ensure y_true and raw_prediction have the + # same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32) + # - in Numpy 2, result.dtype is float64 + # - in Numpy<2, result.dtype is float32 + raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype) + l, g = bin_loss.loss_gradient( + y_true=T, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + loss = l.sum() + # TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2 + # With SciPy >= 1.11.2, the LBFGS implementation will cast to float64 + # https://github.com/scipy/scipy/pull/18825. + # Here we cast to float64 to support SciPy < 1.11.2 + grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64) + return loss, grad + + AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))]) + + opt_result = minimize( + loss_grad, + AB0, + method="L-BFGS-B", + jac=True, + options={ + "gtol": 1e-6, + "ftol": 64 * np.finfo(float).eps, + }, + ) + AB_ = opt_result.x + + # The tuned multiplicative parameter is converted back to the original + # input feature scale. The offset parameter does not need rescaling since + # we did not rescale the outcome variable. + return AB_[0] / scale_constant, AB_[1] + + +class _SigmoidCalibration(RegressorMixin, BaseEstimator): + """Sigmoid regression model. + + Attributes + ---------- + a_ : float + The slope. + + b_ : float + The intercept. + """ + + def fit(self, X, y, sample_weight=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples,) + Training data. + + y : array-like of shape (n_samples,) + Training target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + self : object + Returns an instance of self. + """ + X = column_or_1d(X) + y = column_or_1d(y) + X, y = indexable(X, y) + + self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight) + return self + + def predict(self, T): + """Predict new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) + Data to predict from. + + Returns + ------- + T_ : ndarray of shape (n_samples,) + The predicted data. + """ + T = column_or_1d(T) + return expit(-(self.a_ * T + self.b_)) + + +@validate_params( + { + "y_true": ["array-like"], + "y_prob": ["array-like"], + "pos_label": [Real, str, "boolean", None], + "n_bins": [Interval(Integral, 1, None, closed="left")], + "strategy": [StrOptions({"uniform", "quantile"})], + }, + prefer_skip_nested_validation=True, +) +def calibration_curve( + y_true, + y_prob, + *, + pos_label=None, + n_bins=5, + strategy="uniform", +): + """Compute true and predicted probabilities for a calibration curve. + + The method assumes the inputs come from a binary classifier, and + discretize the [0, 1] interval into bins. + + Calibration curves may also be referred to as reliability diagrams. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True targets. + + y_prob : array-like of shape (n_samples,) + Probabilities of the positive class. + + pos_label : int, float, bool or str, default=None + The label of the positive class. + + .. versionadded:: 1.1 + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval. A bigger number + requires more data. Bins with no samples (i.e. without + corresponding values in `y_prob`) will not be returned, thus the + returned arrays may have less than `n_bins` values. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + uniform + The bins have identical widths. + quantile + The bins have the same number of samples and depend on `y_prob`. + + Returns + ------- + prob_true : ndarray of shape (n_bins,) or smaller + The proportion of samples whose class is the positive class, in each + bin (fraction of positives). + + prob_pred : ndarray of shape (n_bins,) or smaller + The mean predicted probability in each bin. + + References + ---------- + Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good + Probabilities With Supervised Learning, in Proceedings of the 22nd + International Conference on Machine Learning (ICML). + See section 4 (Qualitative Analysis of Predictions). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.calibration import calibration_curve + >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) + >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) + >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) + >>> prob_true + array([0. , 0.5, 1. ]) + >>> prob_pred + array([0.2 , 0.525, 0.85 ]) + """ + y_true = column_or_1d(y_true) + y_prob = column_or_1d(y_prob) + check_consistent_length(y_true, y_prob) + pos_label = _check_pos_label_consistency(pos_label, y_true) + + if y_prob.min() < 0 or y_prob.max() > 1: + raise ValueError("y_prob has values outside [0, 1].") + + labels = np.unique(y_true) + if len(labels) > 2: + raise ValueError( + f"Only binary classification is supported. Provided labels {labels}." + ) + y_true = y_true == pos_label + + if strategy == "quantile": # Determine bin edges by distribution of data + quantiles = np.linspace(0, 1, n_bins + 1) + bins = np.percentile(y_prob, quantiles * 100) + elif strategy == "uniform": + bins = np.linspace(0.0, 1.0, n_bins + 1) + else: + raise ValueError( + "Invalid entry to 'strategy' input. Strategy " + "must be either 'quantile' or 'uniform'." + ) + + binids = np.searchsorted(bins[1:-1], y_prob) + + bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) + bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) + bin_total = np.bincount(binids, minlength=len(bins)) + + nonzero = bin_total != 0 + prob_true = bin_true[nonzero] / bin_total[nonzero] + prob_pred = bin_sums[nonzero] / bin_total[nonzero] + + return prob_true, prob_pred + + +class CalibrationDisplay(_BinaryClassifierCurveDisplayMixin): + """Calibration curve (also known as reliability diagram) visualization. + + It is recommended to use + :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or + :func:`~sklearn.calibration.CalibrationDisplay.from_predictions` + to create a `CalibrationDisplay`. All parameters are stored as attributes. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + .. versionadded:: 1.0 + + Parameters + ---------- + prob_true : ndarray of shape (n_bins,) + The proportion of samples whose class is the positive class (fraction + of positives), in each bin. + + prob_pred : ndarray of shape (n_bins,) + The mean predicted probability in each bin. + + y_prob : ndarray of shape (n_samples,) + Probability estimates for the positive class, for each sample. + + estimator_name : str, default=None + Name of estimator. If None, the estimator name is not shown. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default, `pos_label` is set to `estimators.classes_[1]` when using + `from_estimator` and set to 1 when using `from_predictions`. + + .. versionadded:: 1.1 + + Attributes + ---------- + line_ : matplotlib Artist + Calibration curve. + + ax_ : matplotlib Axes + Axes with calibration curve. + + figure_ : matplotlib Figure + Figure containing the curve. + + See Also + -------- + calibration_curve : Compute true and predicted probabilities for a + calibration curve. + CalibrationDisplay.from_predictions : Plot calibration curve using true + and predicted labels. + CalibrationDisplay.from_estimator : Plot calibration curve using an + estimator and data. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import calibration_curve, CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> y_prob = clf.predict_proba(X_test)[:, 1] + >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10) + >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob) + >>> disp.plot() + <...> + """ + + def __init__( + self, prob_true, prob_pred, y_prob, *, estimator_name=None, pos_label=None + ): + self.prob_true = prob_true + self.prob_pred = prob_pred + self.y_prob = y_prob + self.estimator_name = estimator_name + self.pos_label = pos_label + + def plot(self, *, ax=None, name=None, ref_line=True, **kwargs): + """Plot visualization. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Parameters + ---------- + ax : Matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + name : str, default=None + Name for labeling curve. If `None`, use `estimator_name` if + not `None`, otherwise no labeling is shown. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay` + Object that stores computed values. + """ + self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) + + info_pos_label = ( + f"(Positive class: {self.pos_label})" if self.pos_label is not None else "" + ) + + line_kwargs = {"marker": "s", "linestyle": "-"} + if name is not None: + line_kwargs["label"] = name + line_kwargs.update(**kwargs) + + ref_line_label = "Perfectly calibrated" + existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1] + if ref_line and not existing_ref_line: + self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label) + self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0] + + # We always have to show the legend for at least the reference line + self.ax_.legend(loc="lower right") + + xlabel = f"Mean predicted probability {info_pos_label}" + ylabel = f"Fraction of positives {info_pos_label}" + self.ax_.set(xlabel=xlabel, ylabel=ylabel) + + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + n_bins=5, + strategy="uniform", + pos_label=None, + name=None, + ref_line=True, + ax=None, + **kwargs, + ): + """Plot calibration curve using a binary classifier and data. + + A calibration curve, also known as a reliability diagram, uses inputs + from a binary classifier and plots the average predicted probability + for each bin against the fraction of positive classes, on the + y-axis. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. The classifier must + have a :term:`predict_proba` method. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Binary target values. + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval into when + calculating the calibration curve. A bigger number requires more + data. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + - `'uniform'`: The bins have identical widths. + - `'quantile'`: The bins have the same number of samples and depend + on predicted probabilities. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default, `estimators.classes_[1]` is considered as the + positive class. + + .. versionadded:: 1.1 + + name : str, default=None + Name for labeling curve. If `None`, the name of the estimator is + used. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay`. + Object that stores computed values. + + See Also + -------- + CalibrationDisplay.from_predictions : Plot calibration curve using true + and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test) + >>> plt.show() + """ + y_prob, pos_label, name = cls._validate_and_get_response_values( + estimator, + X, + y, + response_method="predict_proba", + pos_label=pos_label, + name=name, + ) + + return cls.from_predictions( + y, + y_prob, + n_bins=n_bins, + strategy=strategy, + pos_label=pos_label, + name=name, + ref_line=ref_line, + ax=ax, + **kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_prob, + *, + n_bins=5, + strategy="uniform", + pos_label=None, + name=None, + ref_line=True, + ax=None, + **kwargs, + ): + """Plot calibration curve using true labels and predicted probabilities. + + Calibration curve, also known as reliability diagram, uses inputs + from a binary classifier and plots the average predicted probability + for each bin against the fraction of positive classes, on the + y-axis. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_prob : array-like of shape (n_samples,) + The predicted probabilities of the positive class. + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval into when + calculating the calibration curve. A bigger number requires more + data. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + - `'uniform'`: The bins have identical widths. + - `'quantile'`: The bins have the same number of samples and depend + on predicted probabilities. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default `pos_label` is set to 1. + + .. versionadded:: 1.1 + + name : str, default=None + Name for labeling curve. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay`. + Object that stores computed values. + + See Also + -------- + CalibrationDisplay.from_estimator : Plot calibration curve using an + estimator and data. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> y_prob = clf.predict_proba(X_test)[:, 1] + >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob) + >>> plt.show() + """ + pos_label_validated, name = cls._validate_from_predictions_params( + y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name + ) + + prob_true, prob_pred = calibration_curve( + y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label + ) + + disp = cls( + prob_true=prob_true, + prob_pred=prob_pred, + y_prob=y_prob, + estimator_name=name, + pos_label=pos_label_validated, + ) + return disp.plot(ax=ax, ref_line=ref_line, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/conftest.py b/llmeval-env/lib/python3.10/site-packages/sklearn/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..50914929c8facbf1ada63e5b6b60e80277960caa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/conftest.py @@ -0,0 +1,309 @@ +import builtins +import platform +import sys +from contextlib import suppress +from functools import wraps +from os import environ +from unittest import SkipTest + +import joblib +import numpy as np +import pytest +from _pytest.doctest import DoctestItem +from threadpoolctl import threadpool_limits + +from sklearn import config_context, set_config +from sklearn._min_dependencies import PYTEST_MIN_VERSION +from sklearn.datasets import ( + fetch_20newsgroups, + fetch_20newsgroups_vectorized, + fetch_california_housing, + fetch_covtype, + fetch_kddcup99, + fetch_olivetti_faces, + fetch_rcv1, + fetch_species_distributions, +) +from sklearn.tests import random_seed +from sklearn.utils import _IS_32BIT +from sklearn.utils._testing import get_pytest_filterwarning_lines +from sklearn.utils.fixes import ( + np_base_version, + parse_version, + sp_version, +) + +if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION): + raise ImportError( + f"Your version of pytest is too old. Got version {pytest.__version__}, you" + f" should have pytest >= {PYTEST_MIN_VERSION} installed." + ) + +scipy_datasets_require_network = sp_version >= parse_version("1.10") + + +@pytest.fixture +def enable_slep006(): + """Enable SLEP006 for all tests.""" + with config_context(enable_metadata_routing=True): + yield + + +def raccoon_face_or_skip(): + # SciPy >= 1.10 requires network to access to get data + if scipy_datasets_require_network: + run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + if not run_network_tests: + raise SkipTest("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") + + try: + import pooch # noqa + except ImportError: + raise SkipTest("test requires pooch to be installed") + + from scipy.datasets import face + else: + from scipy.misc import face + + return face(gray=True) + + +dataset_fetchers = { + "fetch_20newsgroups_fxt": fetch_20newsgroups, + "fetch_20newsgroups_vectorized_fxt": fetch_20newsgroups_vectorized, + "fetch_california_housing_fxt": fetch_california_housing, + "fetch_covtype_fxt": fetch_covtype, + "fetch_kddcup99_fxt": fetch_kddcup99, + "fetch_olivetti_faces_fxt": fetch_olivetti_faces, + "fetch_rcv1_fxt": fetch_rcv1, + "fetch_species_distributions_fxt": fetch_species_distributions, +} + +if scipy_datasets_require_network: + dataset_fetchers["raccoon_face_fxt"] = raccoon_face_or_skip + +_SKIP32_MARK = pytest.mark.skipif( + environ.get("SKLEARN_RUN_FLOAT32_TESTS", "0") != "1", + reason="Set SKLEARN_RUN_FLOAT32_TESTS=1 to run float32 dtype tests", +) + + +# Global fixtures +@pytest.fixture(params=[pytest.param(np.float32, marks=_SKIP32_MARK), np.float64]) +def global_dtype(request): + yield request.param + + +def _fetch_fixture(f): + """Fetch dataset (download if missing and requested by environment).""" + download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + + @wraps(f) + def wrapped(*args, **kwargs): + kwargs["download_if_missing"] = download_if_missing + try: + return f(*args, **kwargs) + except OSError as e: + if str(e) != "Data not found and `download_if_missing` is False": + raise + pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") + + return pytest.fixture(lambda: wrapped) + + +# Adds fixtures for fetching data +fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups) +fetch_20newsgroups_vectorized_fxt = _fetch_fixture(fetch_20newsgroups_vectorized) +fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing) +fetch_covtype_fxt = _fetch_fixture(fetch_covtype) +fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99) +fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces) +fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1) +fetch_species_distributions_fxt = _fetch_fixture(fetch_species_distributions) +raccoon_face_fxt = pytest.fixture(raccoon_face_or_skip) + + +def pytest_collection_modifyitems(config, items): + """Called after collect is completed. + + Parameters + ---------- + config : pytest config + items : list of collected items + """ + run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + skip_network = pytest.mark.skip( + reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0" + ) + + # download datasets during collection to avoid thread unsafe behavior + # when running pytest in parallel with pytest-xdist + dataset_features_set = set(dataset_fetchers) + datasets_to_download = set() + + for item in items: + if isinstance(item, DoctestItem) and "fetch_" in item.name: + fetcher_function_name = item.name.split(".")[-1] + dataset_fetchers_key = f"{fetcher_function_name}_fxt" + dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set + elif not hasattr(item, "fixturenames"): + continue + else: + item_fixtures = set(item.fixturenames) + dataset_to_fetch = item_fixtures & dataset_features_set + + if not dataset_to_fetch: + continue + + if run_network_tests: + datasets_to_download |= dataset_to_fetch + else: + # network tests are skipped + item.add_marker(skip_network) + + # Only download datasets on the first worker spawned by pytest-xdist + # to avoid thread unsafe behavior. If pytest-xdist is not used, we still + # download before tests run. + worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0") + if worker_id == "gw0" and run_network_tests: + for name in datasets_to_download: + with suppress(SkipTest): + dataset_fetchers[name]() + + for item in items: + # Known failure on with GradientBoostingClassifier on ARM64 + if ( + item.name.endswith("GradientBoostingClassifier") + and platform.machine() == "aarch64" + ): + marker = pytest.mark.xfail( + reason=( + "know failure. See " + "https://github.com/scikit-learn/scikit-learn/issues/17797" # noqa + ) + ) + item.add_marker(marker) + + skip_doctests = False + try: + import matplotlib # noqa + except ImportError: + skip_doctests = True + reason = "matplotlib is required to run the doctests" + + if _IS_32BIT: + reason = "doctest are only run when the default numpy int is 64 bits." + skip_doctests = True + elif sys.platform.startswith("win32"): + reason = ( + "doctests are not run for Windows because numpy arrays " + "repr is inconsistent across platforms." + ) + skip_doctests = True + + if np_base_version >= parse_version("2"): + reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2" + skip_doctests = True + + # Normally doctest has the entire module's scope. Here we set globs to an empty dict + # to remove the module's scope: + # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context + for item in items: + if isinstance(item, DoctestItem): + item.dtest.globs = {} + + if skip_doctests: + skip_marker = pytest.mark.skip(reason=reason) + + for item in items: + if isinstance(item, DoctestItem): + # work-around an internal error with pytest if adding a skip + # mark to a doctest in a contextmanager, see + # https://github.com/pytest-dev/pytest/issues/8796 for more + # details. + if item.name != "sklearn._config.config_context": + item.add_marker(skip_marker) + try: + import PIL # noqa + + pillow_installed = True + except ImportError: + pillow_installed = False + + if not pillow_installed: + skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!") + for item in items: + if item.name in [ + "sklearn.feature_extraction.image.PatchExtractor", + "sklearn.feature_extraction.image.extract_patches_2d", + ]: + item.add_marker(skip_marker) + + +@pytest.fixture(scope="function") +def pyplot(): + """Setup and teardown fixture for matplotlib. + + This fixture checks if we can import matplotlib. If not, the tests will be + skipped. Otherwise, we close the figures before and after running the + functions. + + Returns + ------- + pyplot : module + The ``matplotlib.pyplot`` module. + """ + pyplot = pytest.importorskip("matplotlib.pyplot") + pyplot.close("all") + yield pyplot + pyplot.close("all") + + +def pytest_configure(config): + # Use matplotlib agg backend during the tests including doctests + try: + import matplotlib + + matplotlib.use("agg") + except ImportError: + pass + + allowed_parallelism = joblib.cpu_count(only_physical_cores=True) + xdist_worker_count = environ.get("PYTEST_XDIST_WORKER_COUNT") + if xdist_worker_count is not None: + # Set the number of OpenMP and BLAS threads based on the number of workers + # xdist is using to prevent oversubscription. + allowed_parallelism = max(allowed_parallelism // int(xdist_worker_count), 1) + threadpool_limits(allowed_parallelism) + + # Register global_random_seed plugin if it is not already registered + if not config.pluginmanager.hasplugin("sklearn.tests.random_seed"): + config.pluginmanager.register(random_seed) + + if environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0": + # This seems like the only way to programmatically change the config + # filterwarnings. This was suggested in + # https://github.com/pytest-dev/pytest/issues/3311#issuecomment-373177592 + for line in get_pytest_filterwarning_lines(): + config.addinivalue_line("filterwarnings", line) + + +@pytest.fixture +def hide_available_pandas(monkeypatch): + """Pretend pandas was not installed.""" + import_orig = builtins.__import__ + + def mocked_import(name, *args, **kwargs): + if name == "pandas": + raise ImportError() + return import_orig(name, *args, **kwargs) + + monkeypatch.setattr(builtins, "__import__", mocked_import) + + +@pytest.fixture +def print_changed_only_false(): + """Set `print_changed_only` to False for the duration of the test.""" + set_config(print_changed_only=False) + yield + set_config(print_changed_only=True) # reset to default diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/discriminant_analysis.py b/llmeval-env/lib/python3.10/site-packages/sklearn/discriminant_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..29146ca8576940732550a9c68b1efc29bced2da1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/discriminant_analysis.py @@ -0,0 +1,1047 @@ +""" +Linear Discriminant Analysis and Quadratic Discriminant Analysis +""" + +# Authors: Clemens Brunner +# Martin Billinger +# Matthieu Perrot +# Mathieu Blondel + +# License: BSD 3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.linalg +from scipy import linalg + +from .base import ( + BaseEstimator, + ClassifierMixin, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .covariance import empirical_covariance, ledoit_wolf, shrunk_covariance +from .linear_model._base import LinearClassifierMixin +from .preprocessing import StandardScaler +from .utils._array_api import _expit, device, get_namespace, size +from .utils._param_validation import HasMethods, Interval, StrOptions +from .utils.extmath import softmax +from .utils.multiclass import check_classification_targets, unique_labels +from .utils.validation import check_is_fitted + +__all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"] + + +def _cov(X, shrinkage=None, covariance_estimator=None): + """Estimate covariance matrix (using optional covariance_estimator). + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + shrinkage : {'empirical', 'auto'} or float, default=None + Shrinkage parameter, possible values: + - None or 'empirical': no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` + is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance``. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + s : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + """ + if covariance_estimator is None: + shrinkage = "empirical" if shrinkage is None else shrinkage + if isinstance(shrinkage, str): + if shrinkage == "auto": + sc = StandardScaler() # standardize features + X = sc.fit_transform(X) + s = ledoit_wolf(X)[0] + # rescale + s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] + elif shrinkage == "empirical": + s = empirical_covariance(X) + elif isinstance(shrinkage, Real): + s = shrunk_covariance(empirical_covariance(X), shrinkage) + else: + if shrinkage is not None and shrinkage != 0: + raise ValueError( + "covariance_estimator and shrinkage parameters " + "are not None. Only one of the two can be set." + ) + covariance_estimator.fit(X) + if not hasattr(covariance_estimator, "covariance_"): + raise ValueError( + "%s does not have a covariance_ attribute" + % covariance_estimator.__class__.__name__ + ) + s = covariance_estimator.covariance_ + return s + + +def _class_means(X, y): + """Compute class means. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + means : array-like of shape (n_classes, n_features) + Class means. + """ + xp, is_array_api_compliant = get_namespace(X) + classes, y = xp.unique_inverse(y) + means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype) + + if is_array_api_compliant: + for i in range(classes.shape[0]): + means[i, :] = xp.mean(X[y == i], axis=0) + else: + # TODO: Explore the choice of using bincount + add.at as it seems sub optimal + # from a performance-wise + cnt = np.bincount(y) + np.add.at(means, y, X) + means /= cnt[:, None] + return means + + +def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None): + """Compute weighted within-class covariance matrix. + + The per-class covariance are weighted by the class priors. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + priors : array-like of shape (n_classes,) + Class priors. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + If None, the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + cov : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix + """ + classes = np.unique(y) + cov = np.zeros(shape=(X.shape[1], X.shape[1])) + for idx, group in enumerate(classes): + Xg = X[y == group, :] + cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator)) + return cov + + +class LinearDiscriminantAnalysis( + ClassNamePrefixFeaturesOutMixin, + LinearClassifierMixin, + TransformerMixin, + BaseEstimator, +): + """Linear Discriminant Analysis. + + A classifier with a linear decision boundary, generated by fitting class + conditional densities to the data and using Bayes' rule. + + The model fits a Gaussian density to each class, assuming that all classes + share the same covariance matrix. + + The fitted model can also be used to reduce the dimensionality of the input + by projecting it to the most discriminative directions, using the + `transform` method. + + .. versionadded:: 0.17 + *LinearDiscriminantAnalysis*. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + solver : {'svd', 'lsqr', 'eigen'}, default='svd' + Solver to use, possible values: + - 'svd': Singular value decomposition (default). + Does not compute the covariance matrix, therefore this solver is + recommended for data with a large number of features. + - 'lsqr': Least squares solution. + Can be combined with shrinkage or custom covariance estimator. + - 'eigen': Eigenvalue decomposition. + Can be combined with shrinkage or custom covariance estimator. + + .. versionchanged:: 1.2 + `solver="svd"` now has experimental Array API support. See the + :ref:`Array API User Guide ` for more details. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + This should be left to None if `covariance_estimator` is used. + Note that shrinkage works only with 'lsqr' and 'eigen' solvers. + + priors : array-like of shape (n_classes,), default=None + The class prior probabilities. By default, the class proportions are + inferred from the training data. + + n_components : int, default=None + Number of components (<= min(n_classes - 1, n_features)) for + dimensionality reduction. If None, will be set to + min(n_classes - 1, n_features). This parameter only affects the + `transform` method. + + store_covariance : bool, default=False + If True, explicitly compute the weighted within-class covariance + matrix when solver is 'svd'. The matrix is always computed + and stored for the other solvers. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for a singular value of X to be considered + significant, used to estimate the rank of X. Dimensions whose + singular values are non-significant are discarded. Only used if + solver is 'svd'. + + .. versionadded:: 0.17 + + covariance_estimator : covariance estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance`. + if None the shrinkage parameter drives the estimate. + + This should be left to None if `shrinkage` is used. + Note that `covariance_estimator` works only with 'lsqr' and 'eigen' + solvers. + + .. versionadded:: 0.24 + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_classes, n_features) + Weight vector(s). + + intercept_ : ndarray of shape (n_classes,) + Intercept term. + + covariance_ : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix. It corresponds to + `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the + samples in class `k`. The `C_k` are estimated using the (potentially + shrunk) biased estimator of covariance. If solver is 'svd', only + exists when `store_covariance` is True. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If ``n_components`` is not set then all components are stored and the + sum of explained variances is equal to 1.0. Only available when eigen + or svd solver is used. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + scalings_ : array-like of shape (rank, n_classes - 1) + Scaling of the features in the space spanned by the class centroids. + Only available for 'svd' and 'eigen' solvers. + + xbar_ : array-like of shape (n_features,) + Overall mean. Only present if solver is 'svd'. + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = LinearDiscriminantAnalysis() + >>> clf.fit(X, y) + LinearDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "solver": [StrOptions({"svd", "lsqr", "eigen"})], + "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "priors": ["array-like", None], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "covariance_estimator": [HasMethods("fit"), None], + } + + def __init__( + self, + solver="svd", + shrinkage=None, + priors=None, + n_components=None, + store_covariance=False, + tol=1e-4, + covariance_estimator=None, + ): + self.solver = solver + self.shrinkage = shrinkage + self.priors = priors + self.n_components = n_components + self.store_covariance = store_covariance # used only in svd solver + self.tol = tol # used only in svd solver + self.covariance_estimator = covariance_estimator + + def _solve_lstsq(self, X, y, shrinkage, covariance_estimator): + """Least squares solver. + + The least squares solver computes a straightforward solution of the + optimal decision rule based directly on the discriminant functions. It + can only be used for classification (with any covariance estimator), + because + estimation of eigenvectors is not performed. Therefore, dimensionality + reduction with the transform is not supported. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_classes) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` i + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 2.6.2, pp. 39-41. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_eigen(self, X, y, shrinkage, covariance_estimator): + """Eigenvalue solver. + + The eigenvalue solver computes the optimal solution of the Rayleigh + coefficient (basically the ratio of between class scatter to within + class scatter). This solver supports both classification and + dimensionality reduction (with any covariance estimator). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage constant. + + Shrinkage parameter is ignored if `covariance_estimator` i + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 3.8.3, pp. 121-124. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + + Sw = self.covariance_ # within scatter + St = _cov(X, shrinkage, covariance_estimator) # total scatter + Sb = St - Sw # between scatter + + evals, evecs = linalg.eigh(Sb, Sw) + self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][ + : self._max_components + ] + evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors + + self.scalings_ = evecs + self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_svd(self, X, y): + """SVD solver. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + """ + xp, is_array_api_compliant = get_namespace(X) + + if is_array_api_compliant: + svd = xp.linalg.svd + else: + svd = scipy.linalg.svd + + n_samples, n_features = X.shape + n_classes = self.classes_.shape[0] + + self.means_ = _class_means(X, y) + if self.store_covariance: + self.covariance_ = _class_cov(X, y, self.priors_) + + Xc = [] + for idx, group in enumerate(self.classes_): + Xg = X[y == group] + Xc.append(Xg - self.means_[idx, :]) + + self.xbar_ = self.priors_ @ self.means_ + + Xc = xp.concat(Xc, axis=0) + + # 1) within (univariate) scaling by with classes std-dev + std = xp.std(Xc, axis=0) + # avoid division by zero in normalization + std[std == 0] = 1.0 + fac = xp.asarray(1.0 / (n_samples - n_classes)) + + # 2) Within variance scaling + X = xp.sqrt(fac) * (Xc / std) + # SVD of centered (within)scaled data + U, S, Vt = svd(X, full_matrices=False) + + rank = xp.sum(xp.astype(S > self.tol, xp.int32)) + # Scaling of within covariance is: V' 1/S + scalings = (Vt[:rank, :] / std).T / S[:rank] + fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1) + + # 3) Between variance scaling + # Scale weighted centers + X = ( + (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T + ).T @ scalings + # Centers are living in a space with n_classes-1 dim (maximum) + # Use SVD to find projection in the space spanned by the + # (n_classes) centers + _, S, Vt = svd(X, full_matrices=False) + + if self._max_components == 0: + self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype) + else: + self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[ + : self._max_components + ] + + rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32)) + self.scalings_ = scalings @ Vt.T[:, :rank] + coef = (self.means_ - self.xbar_) @ self.scalings_ + self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_) + self.coef_ = coef @ self.scalings_.T + self.intercept_ -= self.xbar_ @ self.coef_.T + + @_fit_context( + # LinearDiscriminantAnalysis.covariance_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the Linear Discriminant Analysis model. + + .. versionchanged:: 0.19 + *store_covariance* has been moved to main constructor. + + .. versionchanged:: 0.19 + *tol* has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : object + Fitted estimator. + """ + xp, _ = get_namespace(X) + + X, y = self._validate_data( + X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32] + ) + self.classes_ = unique_labels(y) + n_samples, _ = X.shape + n_classes = self.classes_.shape[0] + + if n_samples == n_classes: + raise ValueError( + "The number of samples must be more than the number of classes." + ) + + if self.priors is None: # estimate priors from sample + _, cnts = xp.unique_counts(y) # non-negative ints + self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0]) + else: + self.priors_ = xp.asarray(self.priors, dtype=X.dtype) + + if xp.any(self.priors_ < 0): + raise ValueError("priors must be non-negative") + + if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5: + warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) + self.priors_ = self.priors_ / self.priors_.sum() + + # Maximum number of components no matter what n_components is + # specified: + max_components = min(n_classes - 1, X.shape[1]) + + if self.n_components is None: + self._max_components = max_components + else: + if self.n_components > max_components: + raise ValueError( + "n_components cannot be larger than min(n_features, n_classes - 1)." + ) + self._max_components = self.n_components + + if self.solver == "svd": + if self.shrinkage is not None: + raise NotImplementedError("shrinkage not supported with 'svd' solver.") + if self.covariance_estimator is not None: + raise ValueError( + "covariance estimator " + "is not supported " + "with svd solver. Try another solver" + ) + self._solve_svd(X, y) + elif self.solver == "lsqr": + self._solve_lstsq( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + elif self.solver == "eigen": + self._solve_eigen( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + if size(self.classes_) == 2: # treat binary case as a special case + coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype) + self.coef_ = xp.reshape(coef_, (1, -1)) + intercept_ = xp.asarray( + self.intercept_[1] - self.intercept_[0], dtype=X.dtype + ) + self.intercept_ = xp.reshape(intercept_, (1,)) + self._n_features_out = self._max_components + return self + + def transform(self, X): + """Project data to maximize class separation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) or \ + (n_samples, min(rank, n_components)) + Transformed data. In the case of the 'svd' solver, the shape + is (n_samples, min(rank, n_components)). + """ + if self.solver == "lsqr": + raise NotImplementedError( + "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." + ) + check_is_fitted(self) + xp, _ = get_namespace(X) + X = self._validate_data(X, reset=False) + + if self.solver == "svd": + X_new = (X - self.xbar_) @ self.scalings_ + elif self.solver == "eigen": + X_new = X @ self.scalings_ + + return X_new[:, : self._max_components] + + def predict_proba(self, X): + """Estimate probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated probabilities. + """ + check_is_fitted(self) + xp, is_array_api_compliant = get_namespace(X) + decision = self.decision_function(X) + if size(self.classes_) == 2: + proba = _expit(decision) + return xp.stack([1 - proba, proba], axis=1) + else: + return softmax(decision) + + def predict_log_proba(self, X): + """Estimate log probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated log probabilities. + """ + xp, _ = get_namespace(X) + prediction = self.predict_proba(X) + + info = xp.finfo(prediction.dtype) + if hasattr(info, "smallest_normal"): + smallest_normal = info.smallest_normal + else: + # smallest_normal was introduced in NumPy 1.22 + smallest_normal = info.tiny + + prediction[prediction == 0.0] += smallest_normal + return xp.log(prediction) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is (n_samples,), giving the + log likelihood ratio of the positive class. + """ + # Only override for the doc + return super().decision_function(X) + + def _more_tags(self): + return {"array_api_support": True} + + +class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator): + """Quadratic Discriminant Analysis. + + A classifier with a quadratic decision boundary, generated + by fitting class conditional densities to the data + and using Bayes' rule. + + The model fits a Gaussian density to each class. + + .. versionadded:: 0.17 + *QuadraticDiscriminantAnalysis* + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + priors : array-like of shape (n_classes,), default=None + Class priors. By default, the class proportions are inferred from the + training data. + + reg_param : float, default=0.0 + Regularizes the per-class covariance estimates by transforming S2 as + ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``, + where S2 corresponds to the `scaling_` attribute of a given class. + + store_covariance : bool, default=False + If True, the class covariance matrices are explicitly computed and + stored in the `self.covariance_` attribute. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for a singular value to be considered significant, + used to estimate the rank of `Xk` where `Xk` is the centered matrix + of samples in class k. This parameter does not affect the + predictions. It only controls a warning that is raised when features + are considered to be colinear. + + .. versionadded:: 0.17 + + Attributes + ---------- + covariance_ : list of len n_classes of ndarray \ + of shape (n_features, n_features) + For each class, gives the covariance matrix estimated using the + samples of that class. The estimations are unbiased. Only present if + `store_covariance` is True. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + rotations_ : list of len n_classes of ndarray of shape (n_features, n_k) + For each class k an array of shape (n_features, n_k), where + ``n_k = min(n_features, number of elements in class k)`` + It is the rotation of the Gaussian distribution, i.e. its + principal axis. It corresponds to `V`, the matrix of eigenvectors + coming from the SVD of `Xk = U S Vt` where `Xk` is the centered + matrix of samples from class k. + + scalings_ : list of len n_classes of ndarray of shape (n_k,) + For each class, contains the scaling of + the Gaussian distributions along its principal axes, i.e. the + variance in the rotated coordinate system. It corresponds to `S^2 / + (n_samples - 1)`, where `S` is the diagonal matrix of singular values + from the SVD of `Xk`, where `Xk` is the centered matrix of samples + from class k. + + classes_ : ndarray of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + LinearDiscriminantAnalysis : Linear Discriminant Analysis. + + Examples + -------- + >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = QuadraticDiscriminantAnalysis() + >>> clf.fit(X, y) + QuadraticDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "priors": ["array-like", None], + "reg_param": [Interval(Real, 0, 1, closed="both")], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4 + ): + self.priors = priors + self.reg_param = reg_param + self.store_covariance = store_covariance + self.tol = tol + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data and parameters. + + .. versionchanged:: 0.19 + ``store_covariances`` has been moved to main constructor as + ``store_covariance`` + + .. versionchanged:: 0.19 + ``tol`` has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values (integers). + + Returns + ------- + self : object + Fitted estimator. + """ + X, y = self._validate_data(X, y) + check_classification_targets(y) + self.classes_, y = np.unique(y, return_inverse=True) + n_samples, n_features = X.shape + n_classes = len(self.classes_) + if n_classes < 2: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % (n_classes) + ) + if self.priors is None: + self.priors_ = np.bincount(y) / float(n_samples) + else: + self.priors_ = np.array(self.priors) + + cov = None + store_covariance = self.store_covariance + if store_covariance: + cov = [] + means = [] + scalings = [] + rotations = [] + for ind in range(n_classes): + Xg = X[y == ind, :] + meang = Xg.mean(0) + means.append(meang) + if len(Xg) == 1: + raise ValueError( + "y has only 1 sample in class %s, covariance is ill defined." + % str(self.classes_[ind]) + ) + Xgc = Xg - meang + # Xgc = U * S * V.T + _, S, Vt = np.linalg.svd(Xgc, full_matrices=False) + rank = np.sum(S > self.tol) + if rank < n_features: + warnings.warn("Variables are collinear") + S2 = (S**2) / (len(Xg) - 1) + S2 = ((1 - self.reg_param) * S2) + self.reg_param + if self.store_covariance or store_covariance: + # cov = V * (S^2 / (n-1)) * V.T + cov.append(np.dot(S2 * Vt.T, Vt)) + scalings.append(S2) + rotations.append(Vt.T) + if self.store_covariance or store_covariance: + self.covariance_ = cov + self.means_ = np.asarray(means) + self.scalings_ = scalings + self.rotations_ = rotations + return self + + def _decision_function(self, X): + # return log posterior, see eq (4.12) p. 110 of the ESL. + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + norm2 = [] + for i in range(len(self.classes_)): + R = self.rotations_[i] + S = self.scalings_[i] + Xm = X - self.means_[i] + X2 = np.dot(Xm, R * (S ** (-0.5))) + norm2.append(np.sum(X2**2, axis=1)) + norm2 = np.array(norm2).T # shape = [len(X), n_classes] + u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) + return -0.5 * (norm2 + u) + np.log(self.priors_) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is (n_samples,), giving the + log likelihood ratio of the positive class. + """ + dec_func = self._decision_function(X) + # handle special case of two classes + if len(self.classes_) == 2: + return dec_func[:, 1] - dec_func[:, 0] + return dec_func + + def predict(self, X): + """Perform classification on an array of test vectors X. + + The predicted class C for each sample in X is returned. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + C : ndarray of shape (n_samples,) + Estimated probabilities. + """ + d = self._decision_function(X) + y_pred = self.classes_.take(d.argmax(1)) + return y_pred + + def predict_proba(self, X): + """Return posterior probabilities of classification. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples/test vectors. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Posterior probabilities of classification per class. + """ + values = self._decision_function(X) + # compute the likelihood of the underlying gaussian models + # up to a multiplicative constant. + likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis]) + # compute posterior probabilities + return likelihood / likelihood.sum(axis=1)[:, np.newaxis] + + def predict_log_proba(self, X): + """Return log of posterior probabilities of classification. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples/test vectors. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Posterior log-probabilities of classification per class. + """ + # XXX : can do better to avoid precision overflows + probas_ = self.predict_proba(X) + return np.log(probas_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/dummy.py b/llmeval-env/lib/python3.10/site-packages/sklearn/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..63318b07ce5805797884e2fabcca2156ab770460 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/dummy.py @@ -0,0 +1,682 @@ +# Author: Mathieu Blondel +# Arnaud Joly +# Maheshakya Wijewardena +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, +) +from .utils import check_random_state +from .utils._param_validation import Interval, StrOptions +from .utils.multiclass import class_distribution +from .utils.random import _random_choice_csc +from .utils.stats import _weighted_percentile +from .utils.validation import ( + _check_sample_weight, + _num_samples, + check_array, + check_consistent_length, + check_is_fitted, +) + + +class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator): + """DummyClassifier makes predictions that ignore the input features. + + This classifier serves as a simple baseline to compare against other more + complex classifiers. + + The specific behavior of the baseline is selected with the `strategy` + parameter. + + All strategies make predictions that ignore the input feature values passed + as the `X` argument to `fit` and `predict`. The predictions, however, + typically depend on values observed in the `y` parameter passed to `fit`. + + Note that the "stratified" and "uniform" strategies lead to + non-deterministic predictions that can be rendered deterministic by setting + the `random_state` parameter if needed. The other strategies are naturally + deterministic and, once fit, always return the same constant prediction + for any value of `X`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"most_frequent", "prior", "stratified", "uniform", \ + "constant"}, default="prior" + Strategy to use to generate predictions. + + * "most_frequent": the `predict` method always returns the most + frequent class label in the observed `y` argument passed to `fit`. + The `predict_proba` method returns the matching one-hot encoded + vector. + * "prior": the `predict` method always returns the most frequent + class label in the observed `y` argument passed to `fit` (like + "most_frequent"). ``predict_proba`` always returns the empirical + class distribution of `y` also known as the empirical class prior + distribution. + * "stratified": the `predict_proba` method randomly samples one-hot + vectors from a multinomial distribution parametrized by the empirical + class prior probabilities. + The `predict` method returns the class label which got probability + one in the one-hot vector of `predict_proba`. + Each sampled row of both methods is therefore independent and + identically distributed. + * "uniform": generates predictions uniformly at random from the list + of unique classes observed in `y`, i.e. each class has equal + probability. + * "constant": always predicts a constant label that is provided by + the user. This is useful for metrics that evaluate a non-majority + class. + + .. versionchanged:: 0.24 + The default value of `strategy` has changed to "prior" in version + 0.24. + + random_state : int, RandomState instance or None, default=None + Controls the randomness to generate the predictions when + ``strategy='stratified'`` or ``strategy='uniform'``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + constant : int or str or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of such arrays + Unique class labels observed in `y`. For multi-output classification + problems, this attribute is a list of arrays as each output has an + independent set of possible classes. + + n_classes_ : int or list of int + Number of label for each output. + + class_prior_ : ndarray of shape (n_classes,) or list of such arrays + Frequency of each class observed in `y`. For multioutput classification + problems, this is computed independently for each output. + + n_outputs_ : int + Number of outputs. + + sparse_output_ : bool + True if the array returned from predict is to be in sparse CSC format. + Is automatically set to True if the input `y` is passed in sparse + format. + + See Also + -------- + DummyRegressor : Regressor that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyClassifier + >>> X = np.array([-1, 1, 1, 1]) + >>> y = np.array([0, 1, 1, 1]) + >>> dummy_clf = DummyClassifier(strategy="most_frequent") + >>> dummy_clf.fit(X, y) + DummyClassifier(strategy='most_frequent') + >>> dummy_clf.predict(X) + array([1, 1, 1, 1]) + >>> dummy_clf.score(X, y) + 0.75 + """ + + _parameter_constraints: dict = { + "strategy": [ + StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"}) + ], + "random_state": ["random_state"], + "constant": [Integral, str, "array-like", None], + } + + def __init__(self, *, strategy="prior", random_state=None, constant=None): + self.strategy = strategy + self.random_state = random_state + self.constant = constant + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the baseline classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns the instance itself. + """ + self._strategy = self.strategy + + if self._strategy == "uniform" and sp.issparse(y): + y = y.toarray() + warnings.warn( + ( + "A local copy of the target data has been converted " + "to a numpy array. Predicting on sparse target data " + "with the uniform strategy would not save memory " + "and would be slower." + ), + UserWarning, + ) + + self.sparse_output_ = sp.issparse(y) + + if not self.sparse_output_: + y = np.asarray(y) + y = np.atleast_1d(y) + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self._strategy == "constant": + if self.constant is None: + raise ValueError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + else: + constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) + if constant.shape[0] != self.n_outputs_: + raise ValueError( + "Constant target value should have shape (%d, 1)." + % self.n_outputs_ + ) + + (self.classes_, self.n_classes_, self.class_prior_) = class_distribution( + y, sample_weight + ) + + if self._strategy == "constant": + for k in range(self.n_outputs_): + if not any(constant[k][0] == c for c in self.classes_[k]): + # Checking in case of constant strategy if the constant + # provided by the user is in y. + err_msg = ( + "The constant target value must be present in " + "the training data. You provided constant={}. " + "Possible values are: {}.".format( + self.constant, self.classes_[k].tolist() + ) + ) + raise ValueError(err_msg) + + if self.n_outputs_ == 1: + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + self.class_prior_ = self.class_prior_[0] + + return self + + def predict(self, X): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + # Compute probability only once + if self._strategy == "stratified": + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + proba = [proba] + + if self.sparse_output_: + class_prob = None + if self._strategy in ("most_frequent", "prior"): + classes_ = [np.array([cp.argmax()]) for cp in class_prior_] + + elif self._strategy == "stratified": + class_prob = class_prior_ + + elif self._strategy == "uniform": + raise ValueError( + "Sparse target prediction is not " + "supported with the uniform strategy" + ) + + elif self._strategy == "constant": + classes_ = [np.array([c]) for c in constant] + + y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state) + else: + if self._strategy in ("most_frequent", "prior"): + y = np.tile( + [ + classes_[k][class_prior_[k].argmax()] + for k in range(self.n_outputs_) + ], + [n_samples, 1], + ) + + elif self._strategy == "stratified": + y = np.vstack( + [ + classes_[k][proba[k].argmax(axis=1)] + for k in range(self.n_outputs_) + ] + ).T + + elif self._strategy == "uniform": + ret = [ + classes_[k][rs.randint(n_classes_[k], size=n_samples)] + for k in range(self.n_outputs_) + ] + y = np.vstack(ret).T + + elif self._strategy == "constant": + y = np.tile(self.constant, (n_samples, 1)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + + return y + + def predict_proba(self, X): + """ + Return probability estimates for the test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the probability of the sample for each class in + the model, where classes are ordered arithmetically, for each + output. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + + P = [] + for k in range(self.n_outputs_): + if self._strategy == "most_frequent": + ind = class_prior_[k].argmax() + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + elif self._strategy == "prior": + out = np.ones((n_samples, 1)) * class_prior_[k] + + elif self._strategy == "stratified": + out = rs.multinomial(1, class_prior_[k], size=n_samples) + out = out.astype(np.float64) + + elif self._strategy == "uniform": + out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) + out /= n_classes_[k] + + elif self._strategy == "constant": + ind = np.where(classes_[k] == constant[k]) + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + + P.append(out) + + if self.n_outputs_ == 1: + P = P[0] + + return P + + def predict_log_proba(self, X): + """ + Return log probability estimates for the test vectors X. + + Parameters + ---------- + X : {array-like, object with finite length or shape} + Training data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the log probability of the sample for each class in + the model, where classes are ordered arithmetically for each + output. + """ + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + return np.log(proba) + else: + return [np.log(p) for p in proba] + + def _more_tags(self): + return { + "poor_score": True, + "no_validation": True, + "_xfail_checks": { + "check_methods_subset_invariance": "fails for the predict method", + "check_methods_sample_order_invariance": "fails for the predict method", + }, + } + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since DummyClassifier + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) + + +class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Regressor that makes predictions using simple rules. + + This regressor is useful as a simple baseline to compare with other + (real) regressors. Do not use it for real problems. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"mean", "median", "quantile", "constant"}, default="mean" + Strategy to use to generate predictions. + + * "mean": always predicts the mean of the training set + * "median": always predicts the median of the training set + * "quantile": always predicts a specified quantile of the training set, + provided with the quantile parameter. + * "constant": always predicts a constant value that is provided by + the user. + + constant : int or float or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + quantile : float in [0.0, 1.0], default=None + The quantile to predict using the "quantile" strategy. A quantile of + 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the + maximum. + + Attributes + ---------- + constant_ : ndarray of shape (1, n_outputs) + Mean or median or quantile of the training targets or constant value + given by the user. + + n_outputs_ : int + Number of outputs. + + See Also + -------- + DummyClassifier: Classifier that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyRegressor + >>> X = np.array([1.0, 2.0, 3.0, 4.0]) + >>> y = np.array([2.0, 3.0, 5.0, 10.0]) + >>> dummy_regr = DummyRegressor(strategy="mean") + >>> dummy_regr.fit(X, y) + DummyRegressor() + >>> dummy_regr.predict(X) + array([5., 5., 5., 5.]) + >>> dummy_regr.score(X, y) + 0.0 + """ + + _parameter_constraints: dict = { + "strategy": [StrOptions({"mean", "median", "quantile", "constant"})], + "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None], + "constant": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + } + + def __init__(self, *, strategy="mean", constant=None, quantile=None): + self.strategy = strategy + self.constant = constant + self.quantile = quantile + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the random regressor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted estimator. + """ + y = check_array(y, ensure_2d=False, input_name="y") + if len(y) == 0: + raise ValueError("y must not be empty.") + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y, sample_weight) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self.strategy == "mean": + self.constant_ = np.average(y, axis=0, weights=sample_weight) + + elif self.strategy == "median": + if sample_weight is None: + self.constant_ = np.median(y, axis=0) + else: + self.constant_ = [ + _weighted_percentile(y[:, k], sample_weight, percentile=50.0) + for k in range(self.n_outputs_) + ] + + elif self.strategy == "quantile": + if self.quantile is None: + raise ValueError( + "When using `strategy='quantile', you have to specify the desired " + "quantile in the range [0, 1]." + ) + percentile = self.quantile * 100.0 + if sample_weight is None: + self.constant_ = np.percentile(y, axis=0, q=percentile) + else: + self.constant_ = [ + _weighted_percentile(y[:, k], sample_weight, percentile=percentile) + for k in range(self.n_outputs_) + ] + + elif self.strategy == "constant": + if self.constant is None: + raise TypeError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + + self.constant_ = check_array( + self.constant, + accept_sparse=["csr", "csc", "coo"], + ensure_2d=False, + ensure_min_samples=0, + ) + + if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]: + raise ValueError( + "Constant target value should have shape (%d, 1)." % y.shape[1] + ) + + self.constant_ = np.reshape(self.constant_, (1, -1)) + return self + + def predict(self, X, return_std=False): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + All zeros in this case. + + .. versionadded:: 0.20 + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + + y_std : array-like of shape (n_samples,) or (n_samples, n_outputs) + Standard deviation of predictive distribution of query points. + """ + check_is_fitted(self) + n_samples = _num_samples(X) + + y = np.full( + (n_samples, self.n_outputs_), + self.constant_, + dtype=np.array(self.constant_).dtype, + ) + y_std = np.zeros((n_samples, self.n_outputs_)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + y_std = np.ravel(y_std) + + return (y, y_std) if return_std else y + + def _more_tags(self): + return {"poor_score": True, "no_validation": True} + + def score(self, X, y, sample_weight=None): + """Return the coefficient of determination R^2 of the prediction. + + The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the + residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the + total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best + possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A constant model that always predicts the expected + value of y, disregarding the input features, would get a R^2 score of + 0.0. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since `DummyRegressor` + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True values for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + R^2 of `self.predict(X)` w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/exceptions.py b/llmeval-env/lib/python3.10/site-packages/sklearn/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1466ce783ee0019d60987b4f2450b07cf3793686 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/exceptions.py @@ -0,0 +1,191 @@ +""" +The :mod:`sklearn.exceptions` module includes all custom warnings and error +classes used across scikit-learn. +""" + +__all__ = [ + "NotFittedError", + "ConvergenceWarning", + "DataConversionWarning", + "DataDimensionalityWarning", + "EfficiencyWarning", + "FitFailedWarning", + "SkipTestWarning", + "UndefinedMetricWarning", + "PositiveSpectrumWarning", + "UnsetMetadataPassedError", +] + + +class UnsetMetadataPassedError(ValueError): + """Exception class to raise if a metadata is passed which is not explicitly \ + requested (metadata=True) or not requested (metadata=False). + + .. versionadded:: 1.3 + + Parameters + ---------- + message : str + The message + + unrequested_params : dict + A dictionary of parameters and their values which are provided but not + requested. + + routed_params : dict + A dictionary of routed parameters. + """ + + def __init__(self, *, message, unrequested_params, routed_params): + super().__init__(message) + self.unrequested_params = unrequested_params + self.routed_params = routed_params + + +class NotFittedError(ValueError, AttributeError): + """Exception class to raise if estimator is used before fitting. + + This class inherits from both ValueError and AttributeError to help with + exception handling and backward compatibility. + + Examples + -------- + >>> from sklearn.svm import LinearSVC + >>> from sklearn.exceptions import NotFittedError + >>> try: + ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) + ... except NotFittedError as e: + ... print(repr(e)) + NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with + appropriate arguments before using this estimator."...) + + .. versionchanged:: 0.18 + Moved from sklearn.utils.validation. + """ + + +class ConvergenceWarning(UserWarning): + """Custom warning to capture convergence problems + + .. versionchanged:: 0.18 + Moved from sklearn.utils. + """ + + +class DataConversionWarning(UserWarning): + """Warning used to notify implicit data conversions happening in the code. + + This warning occurs when some input data needs to be converted or + interpreted in a way that may not match the user's expectations. + + For example, this warning may occur when the user + - passes an integer array to a function which expects float input and + will convert the input + - requests a non-copying operation, but a copy is required to meet the + implementation's data-type expectations; + - passes an input whose shape can be interpreted ambiguously. + + .. versionchanged:: 0.18 + Moved from sklearn.utils.validation. + """ + + +class DataDimensionalityWarning(UserWarning): + """Custom warning to notify potential issues with data dimensionality. + + For example, in random projection, this warning is raised when the + number of components, which quantifies the dimensionality of the target + projection space, is higher than the number of features, which quantifies + the dimensionality of the original source space, to imply that the + dimensionality of the problem will not be reduced. + + .. versionchanged:: 0.18 + Moved from sklearn.utils. + """ + + +class EfficiencyWarning(UserWarning): + """Warning used to notify the user of inefficient computation. + + This warning notifies the user that the efficiency may not be optimal due + to some reason which may be included as a part of the warning message. + This may be subclassed into a more specific Warning class. + + .. versionadded:: 0.18 + """ + + +class FitFailedWarning(RuntimeWarning): + """Warning class used if there is an error while fitting the estimator. + + This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV + and the cross-validation helper function cross_val_score to warn when there + is an error while fitting the estimator. + + .. versionchanged:: 0.18 + Moved from sklearn.cross_validation. + """ + + +class SkipTestWarning(UserWarning): + """Warning class used to notify the user of a test that was skipped. + + For example, one of the estimator checks requires a pandas import. + If the pandas package cannot be imported, the test will be skipped rather + than register as a failure. + """ + + +class UndefinedMetricWarning(UserWarning): + """Warning used when the metric is invalid + + .. versionchanged:: 0.18 + Moved from sklearn.base. + """ + + +class PositiveSpectrumWarning(UserWarning): + """Warning raised when the eigenvalues of a PSD matrix have issues + + This warning is typically raised by ``_check_psd_eigenvalues`` when the + eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix + (kernel) present significant negative eigenvalues, or bad conditioning i.e. + very small non-zero eigenvalues compared to the largest eigenvalue. + + .. versionadded:: 0.22 + """ + + +class InconsistentVersionWarning(UserWarning): + """Warning raised when an estimator is unpickled with a inconsistent version. + + Parameters + ---------- + estimator_name : str + Estimator name. + + current_sklearn_version : str + Current scikit-learn version. + + original_sklearn_version : str + Original scikit-learn version. + """ + + def __init__( + self, *, estimator_name, current_sklearn_version, original_sklearn_version + ): + self.estimator_name = estimator_name + self.current_sklearn_version = current_sklearn_version + self.original_sklearn_version = original_sklearn_version + + def __str__(self): + return ( + f"Trying to unpickle estimator {self.estimator_name} from version" + f" {self.original_sklearn_version} when " + f"using version {self.current_sklearn_version}. This might lead to breaking" + " code or " + "invalid results. Use at your own risk. " + "For more info please refer to:\n" + "https://scikit-learn.org/stable/model_persistence.html" + "#security-maintainability-limitations" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/isotonic.py b/llmeval-env/lib/python3.10/site-packages/sklearn/isotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..04456b176379141e6cb5ee8aff361edae3ae5c18 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/isotonic.py @@ -0,0 +1,498 @@ +# Authors: Fabian Pedregosa +# Alexandre Gramfort +# Nelle Varoquaux +# License: BSD 3 clause + +import math +import warnings +from numbers import Real + +import numpy as np +from scipy import interpolate +from scipy.stats import spearmanr + +from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique +from .base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context +from .utils import check_array, check_consistent_length +from .utils._param_validation import Interval, StrOptions, validate_params +from .utils.validation import _check_sample_weight, check_is_fitted + +__all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"] + + +@validate_params( + { + "x": ["array-like"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def check_increasing(x, y): + """Determine whether y is monotonically correlated with x. + + y is found increasing or decreasing with respect to x based on a Spearman + correlation test. + + Parameters + ---------- + x : array-like of shape (n_samples,) + Training data. + + y : array-like of shape (n_samples,) + Training target. + + Returns + ------- + increasing_bool : boolean + Whether the relationship is increasing or decreasing. + + Notes + ----- + The Spearman correlation coefficient is estimated from the data, and the + sign of the resulting estimate is used as the result. + + In the event that the 95% confidence interval based on Fisher transform + spans zero, a warning is raised. + + References + ---------- + Fisher transformation. Wikipedia. + https://en.wikipedia.org/wiki/Fisher_transformation + + Examples + -------- + >>> from sklearn.isotonic import check_increasing + >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] + >>> check_increasing(x, y) + True + >>> y = [10, 8, 6, 4, 2] + >>> check_increasing(x, y) + False + """ + + # Calculate Spearman rho estimate and set return accordingly. + rho, _ = spearmanr(x, y) + increasing_bool = rho >= 0 + + # Run Fisher transform to get the rho CI, but handle rho=+/-1 + if rho not in [-1.0, 1.0] and len(x) > 3: + F = 0.5 * math.log((1.0 + rho) / (1.0 - rho)) + F_se = 1 / math.sqrt(len(x) - 3) + + # Use a 95% CI, i.e., +/-1.96 S.E. + # https://en.wikipedia.org/wiki/Fisher_transformation + rho_0 = math.tanh(F - 1.96 * F_se) + rho_1 = math.tanh(F + 1.96 * F_se) + + # Warn if the CI spans zero. + if np.sign(rho_0) != np.sign(rho_1): + warnings.warn( + "Confidence interval of the Spearman " + "correlation coefficient spans zero. " + "Determination of ``increasing`` may be " + "suspect." + ) + + return increasing_bool + + +@validate_params( + { + "y": ["array-like"], + "sample_weight": ["array-like", None], + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def isotonic_regression( + y, *, sample_weight=None, y_min=None, y_max=None, increasing=True +): + """Solve the isotonic regression model. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y : array-like of shape (n_samples,) + The data. + + sample_weight : array-like of shape (n_samples,), default=None + Weights on each point of the regression. + If None, weight is set to 1 (equal weights). + + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool, default=True + Whether to compute ``y_`` is increasing (if set to True) or decreasing + (if set to False). + + Returns + ------- + y_ : ndarray of shape (n_samples,) + Isotonic fit of y. + + References + ---------- + "Active set algorithms for isotonic regression; A unifying framework" + by Michael J. Best and Nilotpal Chakravarti, section 3. + + Examples + -------- + >>> from sklearn.isotonic import isotonic_regression + >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) + array([2.75 , 2.75 , 2.75 , 2.75 , 7.33..., + 7.33..., 7.33..., 7.33..., 7.33..., 7.33...]) + """ + order = np.s_[:] if increasing else np.s_[::-1] + y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32]) + y = np.array(y[order], dtype=y.dtype) + sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True) + sample_weight = np.ascontiguousarray(sample_weight[order]) + + _inplace_contiguous_isotonic_regression(y, sample_weight) + if y_min is not None or y_max is not None: + # Older versions of np.clip don't accept None as a bound, so use np.inf + if y_min is None: + y_min = -np.inf + if y_max is None: + y_max = np.inf + np.clip(y, y_min, y_max, y) + return y[order] + + +class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator): + """Isotonic regression model. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool or 'auto', default=True + Determines whether the predictions should be constrained to increase + or decrease with `X`. 'auto' will decide based on the Spearman + correlation estimate's sign. + + out_of_bounds : {'nan', 'clip', 'raise'}, default='nan' + Handles how `X` values outside of the training domain are handled + during prediction. + + - 'nan', predictions will be NaN. + - 'clip', predictions will be set to the value corresponding to + the nearest train interval endpoint. + - 'raise', a `ValueError` is raised. + + Attributes + ---------- + X_min_ : float + Minimum value of input array `X_` for left bound. + + X_max_ : float + Maximum value of input array `X_` for right bound. + + X_thresholds_ : ndarray of shape (n_thresholds,) + Unique ascending `X` values used to interpolate + the y = f(X) monotonic function. + + .. versionadded:: 0.24 + + y_thresholds_ : ndarray of shape (n_thresholds,) + De-duplicated `y` values suitable to interpolate the y = f(X) + monotonic function. + + .. versionadded:: 0.24 + + f_ : function + The stepwise interpolating function that covers the input domain ``X``. + + increasing_ : bool + Inferred value for ``increasing``. + + See Also + -------- + sklearn.linear_model.LinearRegression : Ordinary least squares Linear + Regression. + sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that + is a non-parametric model accepting monotonicity constraints. + isotonic_regression : Function to solve the isotonic regression model. + + Notes + ----- + Ties are broken using the secondary method from de Leeuw, 1977. + + References + ---------- + Isotonic Median Regression: A Linear Programming Approach + Nilotpal Chakravarti + Mathematics of Operations Research + Vol. 14, No. 2 (May, 1989), pp. 303-308 + + Isotone Optimization in R : Pool-Adjacent-Violators + Algorithm (PAVA) and Active Set Methods + de Leeuw, Hornik, Mair + Journal of Statistical Software 2009 + + Correctness of Kruskal's algorithms for monotone regression with ties + de Leeuw, Psychometrica, 1977 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.isotonic import IsotonicRegression + >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41) + >>> iso_reg = IsotonicRegression().fit(X, y) + >>> iso_reg.predict([.1, .2]) + array([1.8628..., 3.7256...]) + """ + + _parameter_constraints: dict = { + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean", StrOptions({"auto"})], + "out_of_bounds": [StrOptions({"nan", "clip", "raise"})], + } + + def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"): + self.y_min = y_min + self.y_max = y_max + self.increasing = increasing + self.out_of_bounds = out_of_bounds + + def _check_input_data_shape(self, X): + if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)): + msg = ( + "Isotonic regression input X should be a 1d array or " + "2d array with 1 feature" + ) + raise ValueError(msg) + + def _build_f(self, X, y): + """Build the f_ interp1d function.""" + + bounds_error = self.out_of_bounds == "raise" + if len(y) == 1: + # single y, constant prediction + self.f_ = lambda x: y.repeat(x.shape) + else: + self.f_ = interpolate.interp1d( + X, y, kind="linear", bounds_error=bounds_error + ) + + def _build_y(self, X, y, sample_weight, trim_duplicates=True): + """Build the y_ IsotonicRegression.""" + self._check_input_data_shape(X) + X = X.reshape(-1) # use 1d view + + # Determine increasing if auto-determination requested + if self.increasing == "auto": + self.increasing_ = check_increasing(X, y) + else: + self.increasing_ = self.increasing + + # If sample_weights is passed, removed zero-weight values and clean + # order + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + mask = sample_weight > 0 + X, y, sample_weight = X[mask], y[mask], sample_weight[mask] + + order = np.lexsort((y, X)) + X, y, sample_weight = [array[order] for array in [X, y, sample_weight]] + unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight) + + X = unique_X + y = isotonic_regression( + unique_y, + sample_weight=unique_sample_weight, + y_min=self.y_min, + y_max=self.y_max, + increasing=self.increasing_, + ) + + # Handle the left and right bounds on X + self.X_min_, self.X_max_ = np.min(X), np.max(X) + + if trim_duplicates: + # Remove unnecessary points for faster prediction + keep_data = np.ones((len(y),), dtype=bool) + # Aside from the 1st and last point, remove points whose y values + # are equal to both the point before and the point after it. + keep_data[1:-1] = np.logical_or( + np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]) + ) + return X[keep_data], y[keep_data] + else: + # The ability to turn off trim_duplicates is only used to it make + # easier to unit test that removing duplicates in y does not have + # any impact the resulting interpolation function (besides + # prediction speed). + return X, y + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples,) or (n_samples, 1) + Training data. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + y : array-like of shape (n_samples,) + Training target. + + sample_weight : array-like of shape (n_samples,), default=None + Weights. If set to None, all weights will be set to 1 (equal + weights). + + Returns + ------- + self : object + Returns an instance of self. + + Notes + ----- + X is stored for future use, as :meth:`transform` needs X to interpolate + new input data. + """ + check_params = dict(accept_sparse=False, ensure_2d=False) + X = check_array( + X, input_name="X", dtype=[np.float64, np.float32], **check_params + ) + y = check_array(y, input_name="y", dtype=X.dtype, **check_params) + check_consistent_length(X, y, sample_weight) + + # Transform y by running the isotonic regression algorithm and + # transform X accordingly. + X, y = self._build_y(X, y, sample_weight) + + # It is necessary to store the non-redundant part of the training set + # on the model to make it possible to support model persistence via + # the pickle module as the object built by scipy.interp1d is not + # picklable directly. + self.X_thresholds_, self.y_thresholds_ = X, y + + # Build the interpolation function + self._build_f(X, y) + return self + + def _transform(self, T): + """`_transform` is called by both `transform` and `predict` methods. + + Since `transform` is wrapped to output arrays of specific types (e.g. + NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` + directly. + + The above behaviour could be changed in the future, if we decide to output + other type of arrays when calling `predict`. + """ + if hasattr(self, "X_thresholds_"): + dtype = self.X_thresholds_.dtype + else: + dtype = np.float64 + + T = check_array(T, dtype=dtype, ensure_2d=False) + + self._check_input_data_shape(T) + T = T.reshape(-1) # use 1d view + + if self.out_of_bounds == "clip": + T = np.clip(T, self.X_min_, self.X_max_) + + res = self.f_(T) + + # on scipy 0.17, interp1d up-casts to float64, so we cast back + res = res.astype(T.dtype) + + return res + + def transform(self, T): + """Transform new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The transformed data. + """ + return self._transform(T) + + def predict(self, T): + """Predict new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Transformed data. + """ + return self._transform(T) + + # We implement get_feature_names_out here instead of using + # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored. + # `input_features` are ignored because `IsotonicRegression` accepts 1d + # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays. + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Ignored. + + Returns + ------- + feature_names_out : ndarray of str objects + An ndarray with one string i.e. ["isotonicregression0"]. + """ + check_is_fitted(self, "f_") + class_name = self.__class__.__name__.lower() + return np.asarray([f"{class_name}0"], dtype=object) + + def __getstate__(self): + """Pickle-protocol - return state of the estimator.""" + state = super().__getstate__() + # remove interpolation method + state.pop("f_", None) + return state + + def __setstate__(self, state): + """Pickle-protocol - set state of the estimator. + + We need to rebuild the interpolation function. + """ + super().__setstate__(state) + if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"): + self._build_f(self.X_thresholds_, self.y_thresholds_) + + def _more_tags(self): + return {"X_types": ["1darray"]} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_approximation.py b/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_approximation.py new file mode 100644 index 0000000000000000000000000000000000000000..bcb1e99520e5b76b1f8ed8c72f3220a89f2431cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_approximation.py @@ -0,0 +1,1137 @@ +""" +The :mod:`sklearn.kernel_approximation` module implements several +approximate kernel feature maps based on Fourier transforms and Count Sketches. +""" + +# Author: Andreas Mueller +# Daniel Lopez-Sanchez (TensorSketch) + +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.linalg import svd + +try: + from scipy.fft import fft, ifft +except ImportError: # scipy < 1.4 + from scipy.fftpack import fft, ifft + +from .base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels +from .utils import check_random_state, deprecated +from .utils._param_validation import Interval, StrOptions +from .utils.extmath import safe_sparse_dot +from .utils.validation import ( + _check_feature_names_in, + check_is_fitted, + check_non_negative, +) + + +class PolynomialCountSketch( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Polynomial kernel approximation via Tensor Sketch. + + Implements Tensor Sketch, which approximates the feature map + of the polynomial kernel:: + + K(X, Y) = (gamma * + coef0)^degree + + by efficiently computing a Count Sketch of the outer product of a + vector with itself using Fast Fourier Transforms (FFT). Read more in the + :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + gamma : float, default=1.0 + Parameter of the polynomial kernel whose feature map + will be approximated. + + degree : int, default=2 + Degree of the polynomial kernel whose feature map + will be approximated. + + coef0 : int, default=0 + Constant term of the polynomial kernel whose feature map + will be approximated. + + n_components : int, default=100 + Dimensionality of the output feature space. Usually, `n_components` + should be greater than the number of features in input samples in + order to achieve good performance. The optimal score / run time + balance is typically achieved around `n_components` = 10 * `n_features`, + but this depends on the specific dataset being used. + + random_state : int, RandomState instance, default=None + Determines random number generation for indexHash and bitHash + initialization. Pass an int for reproducible results across multiple + function calls. See :term:`Glossary `. + + Attributes + ---------- + indexHash_ : ndarray of shape (degree, n_features), dtype=int64 + Array of indexes in range [0, n_components) used to represent + the 2-wise independent hash functions for Count Sketch computation. + + bitHash_ : ndarray of shape (degree, n_features), dtype=float32 + Array with random entries in {+1, -1}, used to represent + the 2-wise independent hash functions for Count Sketch computation. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + Examples + -------- + >>> from sklearn.kernel_approximation import PolynomialCountSketch + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> ps = PolynomialCountSketch(degree=3, random_state=1) + >>> X_features = ps.fit_transform(X) + >>> clf = SGDClassifier(max_iter=10, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=10) + >>> clf.score(X_features, y) + 1.0 + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py` + """ + + _parameter_constraints: dict = { + "gamma": [Interval(Real, 0, None, closed="left")], + "degree": [Interval(Integral, 1, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__( + self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None + ): + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Initializes the internal variables. The method needs no information + about the distribution of data, so we only care about n_features in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, accept_sparse="csc") + random_state = check_random_state(self.random_state) + + n_features = X.shape[1] + if self.coef0 != 0: + n_features += 1 + + self.indexHash_ = random_state.randint( + 0, high=self.n_components, size=(self.degree, n_features) + ) + + self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features)) + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Generate the feature map approximation for X. + + Parameters + ---------- + X : {array-like}, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csc", reset=False) + + X_gamma = np.sqrt(self.gamma) * X + + if sp.issparse(X_gamma) and self.coef0 != 0: + X_gamma = sp.hstack( + [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))], + format="csc", + ) + + elif not sp.issparse(X_gamma) and self.coef0 != 0: + X_gamma = np.hstack( + [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))] + ) + + if X_gamma.shape[1] != self.indexHash_.shape[1]: + raise ValueError( + "Number of features of test samples does not" + " match that of training samples." + ) + + count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components)) + + if sp.issparse(X_gamma): + for j in range(X_gamma.shape[1]): + for d in range(self.degree): + iHashIndex = self.indexHash_[d, j] + iHashBit = self.bitHash_[d, j] + count_sketches[:, d, iHashIndex] += ( + (iHashBit * X_gamma[:, [j]]).toarray().ravel() + ) + + else: + for j in range(X_gamma.shape[1]): + for d in range(self.degree): + iHashIndex = self.indexHash_[d, j] + iHashBit = self.bitHash_[d, j] + count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j] + + # For each same, compute a count sketch of phi(x) using the polynomial + # multiplication (via FFT) of p count sketches of x. + count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True) + count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1) + data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True)) + + return data_sketch + + +class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Approximate a RBF kernel feature map using random Fourier features. + + It implements a variant of Random Kitchen Sinks.[1] + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + gamma : 'scale' or float, default=1.0 + Parameter of RBF kernel: exp(-gamma * x^2). + If ``gamma='scale'`` is passed then it uses + 1 / (n_features * X.var()) as value of gamma. + + .. versionadded:: 1.2 + The option `"scale"` was added in 1.2. + + n_components : int, default=100 + Number of Monte Carlo samples per original feature. + Equals the dimensionality of the computed feature space. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + weights and random offset when fitting the training data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32} + Random offset used to compute the projection in the `n_components` + dimensions of the feature space. + + random_weights_ : ndarray of shape (n_features, n_components),\ + dtype={np.float64, np.float32} + Random projection directions drawn from the Fourier transform + of the RBF kernel. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch. + SkewedChi2Sampler : Approximate feature map for + "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + Notes + ----- + See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and + Benjamin Recht. + + [1] "Weighted Sums of Random Kitchen Sinks: Replacing + minimization with randomization in learning" by A. Rahimi and + Benjamin Recht. + (https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) + + Examples + -------- + >>> from sklearn.kernel_approximation import RBFSampler + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> rbf_feature = RBFSampler(gamma=1, random_state=1) + >>> X_features = rbf_feature.fit_transform(X) + >>> clf = SGDClassifier(max_iter=5, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=5) + >>> clf.score(X_features, y) + 1.0 + """ + + _parameter_constraints: dict = { + "gamma": [ + StrOptions({"scale"}), + Interval(Real, 0.0, None, closed="left"), + ], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__(self, *, gamma=1.0, n_components=100, random_state=None): + self.gamma = gamma + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Samples random projection according to n_features. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, accept_sparse="csr") + random_state = check_random_state(self.random_state) + n_features = X.shape[1] + sparse = sp.issparse(X) + if self.gamma == "scale": + # var = E[X^2] - E[X]^2 if sparse + X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() + self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0 + else: + self._gamma = self.gamma + self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal( + size=(n_features, self.n_components) + ) + + self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) + + if X.dtype == np.float32: + # Setting the data type of the fitted attribute will ensure the + # output data type during `transform`. + self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) + self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) + + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Apply the approximate feature map to X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + + X = self._validate_data(X, accept_sparse="csr", reset=False) + projection = safe_sparse_dot(X, self.random_weights_) + projection += self.random_offset_ + np.cos(projection, projection) + projection *= (2.0 / self.n_components) ** 0.5 + return projection + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + +class SkewedChi2Sampler( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Approximate feature map for "skewed chi-squared" kernel. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + skewedness : float, default=1.0 + "skewedness" parameter of the kernel. Needs to be cross-validated. + + n_components : int, default=100 + Number of Monte Carlo samples per original feature. + Equals the dimensionality of the computed feature space. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + weights and random offset when fitting the training data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + random_weights_ : ndarray of shape (n_features, n_components) + Weight array, sampled from a secant hyperbolic distribution, which will + be used to linearly transform the log of the data. + + random_offset_ : ndarray of shape (n_features, n_components) + Bias term, which will be added to the data. It is uniformly distributed + between 0 and 2*pi. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + References + ---------- + See "Random Fourier Approximations for Skewed Multiplicative Histogram + Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. + + Examples + -------- + >>> from sklearn.kernel_approximation import SkewedChi2Sampler + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> chi2_feature = SkewedChi2Sampler(skewedness=.01, + ... n_components=10, + ... random_state=0) + >>> X_features = chi2_feature.fit_transform(X, y) + >>> clf = SGDClassifier(max_iter=10, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=10) + >>> clf.score(X_features, y) + 1.0 + """ + + _parameter_constraints: dict = { + "skewedness": [Interval(Real, None, None, closed="neither")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__(self, *, skewedness=1.0, n_components=100, random_state=None): + self.skewedness = skewedness + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Samples random projection according to n_features. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + random_state = check_random_state(self.random_state) + n_features = X.shape[1] + uniform = random_state.uniform(size=(n_features, self.n_components)) + # transform by inverse CDF of sech + self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform)) + self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) + + if X.dtype == np.float32: + # Setting the data type of the fitted attribute will ensure the + # output data type during `transform`. + self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) + self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) + + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Apply the approximate feature map to X. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. All values of X must be + strictly greater than "-skewedness". + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + X = self._validate_data( + X, copy=True, dtype=[np.float64, np.float32], reset=False + ) + if (X <= -self.skewedness).any(): + raise ValueError("X may not contain entries smaller than -skewedness.") + + X += self.skewedness + np.log(X, X) + projection = safe_sparse_dot(X, self.random_weights_) + projection += self.random_offset_ + np.cos(projection, projection) + projection *= np.sqrt(2.0) / np.sqrt(self.n_components) + return projection + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} + + +class AdditiveChi2Sampler(TransformerMixin, BaseEstimator): + """Approximate feature map for additive chi2 kernel. + + Uses sampling the fourier transform of the kernel characteristic + at regular intervals. + + Since the kernel that is to be approximated is additive, the components of + the input vectors can be treated separately. Each entry in the original + space is transformed into 2*sample_steps-1 features, where sample_steps is + a parameter of the method. Typical values of sample_steps include 1, 2 and + 3. + + Optimal choices for the sampling interval for certain data ranges can be + computed (see the reference). The default values should be reasonable. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + sample_steps : int, default=2 + Gives the number of (complex) sampling points. + + sample_interval : float, default=None + Sampling interval. Must be specified when sample_steps not in {1,2,3}. + + Attributes + ---------- + sample_interval_ : float + Stored sampling interval. Specified as a parameter if `sample_steps` + not in {1,2,3}. + + .. deprecated:: 1.3 + `sample_interval_` serves internal purposes only and will be removed in 1.5. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of + the chi squared kernel. + + sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. + + sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi + squared kernel. + + Notes + ----- + This estimator approximates a slightly different version of the additive + chi squared kernel then ``metric.additive_chi2`` computes. + + This estimator is stateless and does not need to be fitted. However, we + recommend to call :meth:`fit_transform` instead of :meth:`transform`, as + parameter validation is only performed in :meth:`fit`. + + References + ---------- + See `"Efficient additive kernels via explicit feature maps" + `_ + A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, + 2011 + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.linear_model import SGDClassifier + >>> from sklearn.kernel_approximation import AdditiveChi2Sampler + >>> X, y = load_digits(return_X_y=True) + >>> chi2sampler = AdditiveChi2Sampler(sample_steps=2) + >>> X_transformed = chi2sampler.fit_transform(X, y) + >>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3) + >>> clf.fit(X_transformed, y) + SGDClassifier(max_iter=5, random_state=0) + >>> clf.score(X_transformed, y) + 0.9499... + """ + + _parameter_constraints: dict = { + "sample_steps": [Interval(Integral, 1, None, closed="left")], + "sample_interval": [Interval(Real, 0, None, closed="left"), None], + } + + def __init__(self, *, sample_steps=2, sample_interval=None): + self.sample_steps = sample_steps + self.sample_interval = sample_interval + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the transformer. + """ + X = self._validate_data(X, accept_sparse="csr") + check_non_negative(X, "X in AdditiveChi2Sampler.fit") + + # TODO(1.5): remove the setting of _sample_interval from fit + if self.sample_interval is None: + # See figure 2 c) of "Efficient additive kernels via explicit feature maps" + # + # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, + # 2011 + if self.sample_steps == 1: + self._sample_interval = 0.8 + elif self.sample_steps == 2: + self._sample_interval = 0.5 + elif self.sample_steps == 3: + self._sample_interval = 0.4 + else: + raise ValueError( + "If sample_steps is not in [1, 2, 3]," + " you need to provide sample_interval" + ) + else: + self._sample_interval = self.sample_interval + + return self + + # TODO(1.5): remove + @deprecated( # type: ignore + "The ``sample_interval_`` attribute was deprecated in version 1.3 and " + "will be removed 1.5." + ) + @property + def sample_interval_(self): + return self._sample_interval + + def transform(self, X): + """Apply approximate feature map to X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : {ndarray, sparse matrix}, \ + shape = (n_samples, n_features * (2*sample_steps - 1)) + Whether the return value is an array or sparse matrix depends on + the type of the input X. + """ + X = self._validate_data(X, accept_sparse="csr", reset=False) + check_non_negative(X, "X in AdditiveChi2Sampler.transform") + sparse = sp.issparse(X) + + if hasattr(self, "_sample_interval"): + # TODO(1.5): remove this branch + sample_interval = self._sample_interval + + else: + if self.sample_interval is None: + # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa + # + # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa + # 2011 + if self.sample_steps == 1: + sample_interval = 0.8 + elif self.sample_steps == 2: + sample_interval = 0.5 + elif self.sample_steps == 3: + sample_interval = 0.4 + else: + raise ValueError( + "If sample_steps is not in [1, 2, 3]," + " you need to provide sample_interval" + ) + else: + sample_interval = self.sample_interval + + # zeroth component + # 1/cosh = sech + # cosh(0) = 1.0 + transf = self._transform_sparse if sparse else self._transform_dense + return transf(X, self.sample_steps, sample_interval) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Only used to validate feature names with the names seen in :meth:`fit`. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in( + self, input_features, generate_names=True + ) + est_name = self.__class__.__name__.lower() + + names_list = [f"{est_name}_{name}_sqrt" for name in input_features] + + for j in range(1, self.sample_steps): + cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features] + sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features] + names_list.extend(cos_names + sin_names) + + return np.asarray(names_list, dtype=object) + + @staticmethod + def _transform_dense(X, sample_steps, sample_interval): + non_zero = X != 0.0 + X_nz = X[non_zero] + + X_step = np.zeros_like(X) + X_step[non_zero] = np.sqrt(X_nz * sample_interval) + + X_new = [X_step] + + log_step_nz = sample_interval * np.log(X_nz) + step_nz = 2 * X_nz * sample_interval + + for j in range(1, sample_steps): + factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) + + X_step = np.zeros_like(X) + X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) + X_new.append(X_step) + + X_step = np.zeros_like(X) + X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) + X_new.append(X_step) + + return np.hstack(X_new) + + @staticmethod + def _transform_sparse(X, sample_steps, sample_interval): + indices = X.indices.copy() + indptr = X.indptr.copy() + + data_step = np.sqrt(X.data * sample_interval) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new = [X_step] + + log_step_nz = sample_interval * np.log(X.data) + step_nz = 2 * X.data * sample_interval + + for j in range(1, sample_steps): + factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) + + data_step = factor_nz * np.cos(j * log_step_nz) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new.append(X_step) + + data_step = factor_nz * np.sin(j * log_step_nz) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new.append(X_step) + + return sp.hstack(X_new) + + def _more_tags(self): + return {"stateless": True, "requires_positive_X": True} + + +class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Approximate a kernel map using a subset of the training data. + + Constructs an approximate feature map for an arbitrary kernel + using a subset of the data as basis. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + kernel : str or callable, default='rbf' + Kernel map to be approximated. A callable should accept two arguments + and the keyword arguments passed to this object as `kernel_params`, and + should return a floating point number. + + gamma : float, default=None + Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 + and sigmoid kernels. Interpretation of the default value is left to + the kernel; see the documentation for sklearn.metrics.pairwise. + Ignored by other kernels. + + coef0 : float, default=None + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + degree : float, default=None + Degree of the polynomial kernel. Ignored by other kernels. + + kernel_params : dict, default=None + Additional parameters (keyword arguments) for kernel function passed + as callable object. + + n_components : int, default=100 + Number of features to construct. + How many data points will be used to construct the mapping. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the uniform sampling without + replacement of `n_components` of the training data to construct the + basis kernel. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of jobs to use for the computation. This works by breaking + down the kernel matrix into `n_jobs` even slices and computing them in + parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.24 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Subset of training points used to construct the feature map. + + component_indices_ : ndarray of shape (n_components) + Indices of ``components_`` in the training set. + + normalization_ : ndarray of shape (n_components, n_components) + Normalization matrix needed for embedding. + Square root of the kernel matrix on ``components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + References + ---------- + * Williams, C.K.I. and Seeger, M. + "Using the Nystroem method to speed up kernel machines", + Advances in neural information processing systems 2001 + + * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou + "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical + Comparison", + Advances in Neural Information Processing Systems 2012 + + Examples + -------- + >>> from sklearn import datasets, svm + >>> from sklearn.kernel_approximation import Nystroem + >>> X, y = datasets.load_digits(n_class=9, return_X_y=True) + >>> data = X / 16. + >>> clf = svm.LinearSVC(dual="auto") + >>> feature_map_nystroem = Nystroem(gamma=.2, + ... random_state=1, + ... n_components=300) + >>> data_transformed = feature_map_nystroem.fit_transform(data) + >>> clf.fit(data_transformed, y) + LinearSVC(dual='auto') + >>> clf.score(data_transformed, y) + 0.9987... + """ + + _parameter_constraints: dict = { + "kernel": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "coef0": [Interval(Real, None, None, closed="neither"), None], + "degree": [Interval(Real, 1, None, closed="left"), None], + "kernel_params": [dict, None], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + } + + def __init__( + self, + kernel="rbf", + *, + gamma=None, + coef0=None, + degree=None, + kernel_params=None, + n_components=100, + random_state=None, + n_jobs=None, + ): + self.kernel = kernel + self.gamma = gamma + self.coef0 = coef0 + self.degree = degree + self.kernel_params = kernel_params + self.n_components = n_components + self.random_state = random_state + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit estimator to data. + + Samples a subset of training points, computes kernel + on these and computes normalization matrix. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, accept_sparse="csr") + rnd = check_random_state(self.random_state) + n_samples = X.shape[0] + + # get basis vectors + if self.n_components > n_samples: + # XXX should we just bail? + n_components = n_samples + warnings.warn( + "n_components > n_samples. This is not possible.\n" + "n_components was set to n_samples, which results" + " in inefficient evaluation of the full kernel." + ) + + else: + n_components = self.n_components + n_components = min(n_samples, n_components) + inds = rnd.permutation(n_samples) + basis_inds = inds[:n_components] + basis = X[basis_inds] + + basis_kernel = pairwise_kernels( + basis, + metric=self.kernel, + filter_params=True, + n_jobs=self.n_jobs, + **self._get_kernel_params(), + ) + + # sqrt of kernel matrix on basis vectors + U, S, V = svd(basis_kernel) + S = np.maximum(S, 1e-12) + self.normalization_ = np.dot(U / np.sqrt(S), V) + self.components_ = basis + self.component_indices_ = basis_inds + self._n_features_out = n_components + return self + + def transform(self, X): + """Apply feature map to X. + + Computes an approximate feature map using the kernel + between some training points and X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to transform. + + Returns + ------- + X_transformed : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + + kernel_params = self._get_kernel_params() + embedded = pairwise_kernels( + X, + self.components_, + metric=self.kernel, + filter_params=True, + n_jobs=self.n_jobs, + **kernel_params, + ) + return np.dot(embedded, self.normalization_.T) + + def _get_kernel_params(self): + params = self.kernel_params + if params is None: + params = {} + if not callable(self.kernel) and self.kernel != "precomputed": + for param in KERNEL_PARAMS[self.kernel]: + if getattr(self, param) is not None: + params[param] = getattr(self, param) + else: + if ( + self.gamma is not None + or self.coef0 is not None + or self.degree is not None + ): + raise ValueError( + "Don't pass gamma, coef0 or degree to " + "Nystroem if using a callable " + "or precomputed kernel" + ) + + return params + + def _more_tags(self): + return { + "_xfail_checks": { + "check_transformer_preserve_dtypes": ( + "dtypes are preserved but not at a close enough precision" + ) + }, + "preserves_dtype": [np.float64, np.float32], + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_ridge.py b/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..23890f3a68cd79bd3e6c8bcea28c82fc572f69db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/kernel_ridge.py @@ -0,0 +1,237 @@ +"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression.""" + +# Authors: Mathieu Blondel +# Jan Hendrik Metzen +# License: BSD 3 clause +from numbers import Real + +import numpy as np + +from .base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context +from .linear_model._ridge import _solve_cholesky_kernel +from .metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels +from .utils._param_validation import Interval, StrOptions +from .utils.validation import _check_sample_weight, check_is_fitted + + +class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Kernel ridge regression. + + Kernel ridge regression (KRR) combines ridge regression (linear least + squares with l2-norm regularization) with the kernel trick. It thus + learns a linear function in the space induced by the respective kernel and + the data. For non-linear kernels, this corresponds to a non-linear + function in the original space. + + The form of the model learned by KRR is identical to support vector + regression (SVR). However, different loss functions are used: KRR uses + squared error loss while support vector regression uses epsilon-insensitive + loss, both combined with l2 regularization. In contrast to SVR, fitting a + KRR model can be done in closed-form and is typically faster for + medium-sized datasets. On the other hand, the learned model is non-sparse + and thus slower than SVR, which learns a sparse model for epsilon > 0, at + prediction-time. + + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape [n_samples, n_targets]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_targets,), default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. See :ref:`ridge_regression` for formula. + + kernel : str or callable, default="linear" + Kernel mapping used internally. This parameter is directly passed to + :class:`~sklearn.metrics.pairwise.pairwise_kernels`. + If `kernel` is a string, it must be one of the metrics + in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed". + If `kernel` is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if `kernel` is a callable function, it is called on + each pair of instances (rows) and the resulting value recorded. The + callable should take two rows from X as input and return the + corresponding kernel value as a single number. This means that + callables from :mod:`sklearn.metrics.pairwise` are not allowed, as + they operate on matrices, not single samples. Use the string + identifying the kernel instead. + + gamma : float, default=None + Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 + and sigmoid kernels. Interpretation of the default value is left to + the kernel; see the documentation for sklearn.metrics.pairwise. + Ignored by other kernels. + + degree : float, default=3 + Degree of the polynomial kernel. Ignored by other kernels. + + coef0 : float, default=1 + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Additional parameters (keyword arguments) for kernel function passed + as callable object. + + Attributes + ---------- + dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets) + Representation of weight vector(s) in kernel space + + X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data, which is also required for prediction. If + kernel == "precomputed" this is instead the precomputed + training matrix, of shape (n_samples, n_samples). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.gaussian_process.GaussianProcessRegressor : Gaussian + Process regressor providing automatic kernel hyperparameters + tuning and predictions uncertainty. + sklearn.linear_model.Ridge : Linear ridge regression. + sklearn.linear_model.RidgeCV : Ridge regression with built-in + cross-validation. + sklearn.svm.SVR : Support Vector Regression accepting a large variety + of kernels. + + References + ---------- + * Kevin P. Murphy + "Machine Learning: A Probabilistic Perspective", The MIT Press + chapter 14.4.3, pp. 492-493 + + Examples + -------- + >>> from sklearn.kernel_ridge import KernelRidge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> krr = KernelRidge(alpha=1.0) + >>> krr.fit(X, y) + KernelRidge(alpha=1.0) + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "kernel": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + } + + def __init__( + self, + alpha=1, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + ): + self.alpha = alpha + self.kernel = kernel + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.kernel_params = kernel_params + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) + + def _more_tags(self): + return {"pairwise": self.kernel == "precomputed"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Kernel Ridge regression model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. If kernel == "precomputed" this is instead + a precomputed kernel matrix, of shape (n_samples, n_samples). + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample, ignored if None is passed. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Convert data + X, y = self._validate_data( + X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True + ) + if sample_weight is not None and not isinstance(sample_weight, float): + sample_weight = _check_sample_weight(sample_weight, X) + + K = self._get_kernel(X) + alpha = np.atleast_1d(self.alpha) + + ravel = False + if len(y.shape) == 1: + y = y.reshape(-1, 1) + ravel = True + + copy = self.kernel == "precomputed" + self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) + if ravel: + self.dual_coef_ = self.dual_coef_.ravel() + + self.X_fit_ = X + + return self + + def predict(self, X): + """Predict using the kernel ridge model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. If kernel == "precomputed" this is instead a + precomputed kernel matrix, shape = [n_samples, + n_samples_fitted], where n_samples_fitted is the number of + samples used in the fitting for this estimator. + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_targets) + Returns predicted values. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse=("csr", "csc"), reset=False) + K = self._get_kernel(X, self.X_fit_) + return np.dot(K, self.dual_coef_) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..45c99d4d36df1f13b9ac23372f86267c8fdfe14f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/__init__.py @@ -0,0 +1,100 @@ +""" +The :mod:`sklearn.linear_model` module implements a variety of linear models. +""" + +# See http://scikit-learn.sourceforge.net/modules/sgd.html and +# http://scikit-learn.sourceforge.net/modules/linear_model.html for +# complete documentation. + +from ._base import LinearRegression +from ._bayes import ARDRegression, BayesianRidge +from ._coordinate_descent import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + enet_path, + lasso_path, +) +from ._glm import GammaRegressor, PoissonRegressor, TweedieRegressor +from ._huber import HuberRegressor +from ._least_angle import ( + Lars, + LarsCV, + LassoLars, + LassoLarsCV, + LassoLarsIC, + lars_path, + lars_path_gram, +) +from ._logistic import LogisticRegression, LogisticRegressionCV +from ._omp import ( + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from ._passive_aggressive import PassiveAggressiveClassifier, PassiveAggressiveRegressor +from ._perceptron import Perceptron +from ._quantile import QuantileRegressor +from ._ransac import RANSACRegressor +from ._ridge import Ridge, RidgeClassifier, RidgeClassifierCV, RidgeCV, ridge_regression +from ._sgd_fast import Hinge, Huber, Log, ModifiedHuber, SquaredLoss +from ._stochastic_gradient import SGDClassifier, SGDOneClassSVM, SGDRegressor +from ._theil_sen import TheilSenRegressor + +__all__ = [ + "ARDRegression", + "BayesianRidge", + "ElasticNet", + "ElasticNetCV", + "Hinge", + "Huber", + "HuberRegressor", + "Lars", + "LarsCV", + "Lasso", + "LassoCV", + "LassoLars", + "LassoLarsCV", + "LassoLarsIC", + "LinearRegression", + "Log", + "LogisticRegression", + "LogisticRegressionCV", + "ModifiedHuber", + "MultiTaskElasticNet", + "MultiTaskElasticNetCV", + "MultiTaskLasso", + "MultiTaskLassoCV", + "OrthogonalMatchingPursuit", + "OrthogonalMatchingPursuitCV", + "PassiveAggressiveClassifier", + "PassiveAggressiveRegressor", + "Perceptron", + "QuantileRegressor", + "Ridge", + "RidgeCV", + "RidgeClassifier", + "RidgeClassifierCV", + "SGDClassifier", + "SGDRegressor", + "SGDOneClassSVM", + "SquaredLoss", + "TheilSenRegressor", + "enet_path", + "lars_path", + "lars_path_gram", + "lasso_path", + "orthogonal_mp", + "orthogonal_mp_gram", + "ridge_regression", + "RANSACRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..f07e974542a5bd61eb88e62602a18695d7ad089c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_base.py @@ -0,0 +1,814 @@ +""" +Generalized Linear Models. +""" + +# Author: Alexandre Gramfort +# Fabian Pedregosa +# Olivier Grisel +# Vincent Michel +# Peter Prettenhofer +# Mathieu Blondel +# Lars Buitinck +# Maryan Morel +# Giorgio Patrini +# Maria Telenczuk +# License: BSD 3 clause + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +import scipy.sparse as sp +from scipy import linalg, optimize, sparse +from scipy.sparse.linalg import lsqr +from scipy.special import expit + +from ..base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, +) +from ..utils import check_array, check_random_state +from ..utils._array_api import get_namespace +from ..utils._seq_dataset import ( + ArrayDataset32, + ArrayDataset64, + CSRDataset32, + CSRDataset64, +) +from ..utils.extmath import safe_sparse_dot +from ..utils.parallel import Parallel, delayed +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import FLOAT_DTYPES, _check_sample_weight, check_is_fitted + +# TODO: bayesian_ridge_regression and bayesian_regression_ard +# should be squashed into its respective objects. + +SPARSE_INTERCEPT_DECAY = 0.01 +# For sparse data intercept updates are scaled by this decay factor to avoid +# intercept oscillation. + + +def make_dataset(X, y, sample_weight, random_state=None): + """Create ``Dataset`` abstraction for sparse and dense inputs. + + This also returns the ``intercept_decay`` which is different + for sparse datasets. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data + + y : array-like, shape (n_samples, ) + Target values. + + sample_weight : numpy array of shape (n_samples,) + The weight of each sample + + random_state : int, RandomState instance or None (default) + Determines random number generation for dataset random sampling. It is not + used for dataset shuffling. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + dataset + The ``Dataset`` abstraction + intercept_decay + The intercept decay + """ + + rng = check_random_state(random_state) + # seed should never be 0 in SequentialDataset64 + seed = rng.randint(1, np.iinfo(np.int32).max) + + if X.dtype == np.float32: + CSRData = CSRDataset32 + ArrayData = ArrayDataset32 + else: + CSRData = CSRDataset64 + ArrayData = ArrayDataset64 + + if sp.issparse(X): + dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) + intercept_decay = SPARSE_INTERCEPT_DECAY + else: + X = np.ascontiguousarray(X) + dataset = ArrayData(X, y, sample_weight, seed=seed) + intercept_decay = 1.0 + + return dataset, intercept_decay + + +def _preprocess_data( + X, + y, + *, + fit_intercept, + copy=True, + copy_y=True, + sample_weight=None, + check_input=True, +): + """Common data preprocessing for fitting linear models. + + This helper is in charge of the following steps: + + - Ensure that `sample_weight` is an array or `None`. + - If `check_input=True`, perform standard input validation of `X`, `y`. + - Perform copies if requested to avoid side-effects in case of inplace + modifications of the input. + + Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as + follows: + - if `X` is dense, center the data and + store the mean vector in `X_offset`. + - if `X` is sparse, store the mean in `X_offset` + without centering `X`. The centering is expected to be handled by the + linear solver where appropriate. + - in either case, always center `y` and store the mean in `y_offset`. + - both `X_offset` and `y_offset` are always weighted by `sample_weight` + if not set to `None`. + + If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset` + are set to zero. + + Returns + ------- + X_out : {ndarray, sparse matrix} of shape (n_samples, n_features) + If copy=True a copy of the input X is triggered, otherwise operations are + inplace. + If input X is dense, then X_out is centered. + y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets) + Centered version of y. Possibly performed inplace on input y depending + on the copy_y parameter. + X_offset : ndarray of shape (n_features,) + The mean per column of input X. + y_offset : float or ndarray of shape (n_features,) + X_scale : ndarray of shape (n_features,) + Always an array of ones. TODO: refactor the code base to make it + possible to remove this unused variable. + """ + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + sample_weight = np.asarray(sample_weight) + + if check_input: + X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES) + y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False) + else: + y = y.astype(X.dtype, copy=copy_y) + if copy: + if sp.issparse(X): + X = X.copy() + else: + X = X.copy(order="K") + + if fit_intercept: + if sp.issparse(X): + X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight) + else: + X_offset = np.average(X, axis=0, weights=sample_weight) + + X_offset = X_offset.astype(X.dtype, copy=False) + X -= X_offset + + y_offset = np.average(y, axis=0, weights=sample_weight) + y -= y_offset + else: + X_offset = np.zeros(X.shape[1], dtype=X.dtype) + if y.ndim == 1: + y_offset = X.dtype.type(0) + else: + y_offset = np.zeros(y.shape[1], dtype=X.dtype) + + # XXX: X_scale is no longer needed. It is an historic artifact from the + # time where linear model exposed the normalize parameter. + X_scale = np.ones(X.shape[1], dtype=X.dtype) + return X, y, X_offset, y_offset, X_scale + + +# TODO: _rescale_data should be factored into _preprocess_data. +# Currently, the fact that sag implements its own way to deal with +# sample_weight makes the refactoring tricky. + + +def _rescale_data(X, y, sample_weight, inplace=False): + """Rescale data sample-wise by square root of sample_weight. + + For many linear models, this enables easy support for sample_weight because + + (y - X w)' S (y - X w) + + with S = diag(sample_weight) becomes + + ||y_rescaled - X_rescaled w||_2^2 + + when setting + + y_rescaled = sqrt(S) y + X_rescaled = sqrt(S) X + + Returns + ------- + X_rescaled : {array-like, sparse matrix} + + y_rescaled : {array-like, sparse matrix} + """ + # Assume that _validate_data and _check_sample_weight have been called by + # the caller. + n_samples = X.shape[0] + sample_weight_sqrt = np.sqrt(sample_weight) + + if sp.issparse(X) or sp.issparse(y): + sw_matrix = sparse.dia_matrix( + (sample_weight_sqrt, 0), shape=(n_samples, n_samples) + ) + + if sp.issparse(X): + X = safe_sparse_dot(sw_matrix, X) + else: + if inplace: + X *= sample_weight_sqrt[:, np.newaxis] + else: + X = X * sample_weight_sqrt[:, np.newaxis] + + if sp.issparse(y): + y = safe_sparse_dot(sw_matrix, y) + else: + if inplace: + if y.ndim == 1: + y *= sample_weight_sqrt + else: + y *= sample_weight_sqrt[:, np.newaxis] + else: + if y.ndim == 1: + y = y * sample_weight_sqrt + else: + y = y * sample_weight_sqrt[:, np.newaxis] + return X, y, sample_weight_sqrt + + +class LinearModel(BaseEstimator, metaclass=ABCMeta): + """Base class for Linear Models""" + + @abstractmethod + def fit(self, X, y): + """Fit model.""" + + def _decision_function(self, X): + check_is_fitted(self) + + X = self._validate_data(X, accept_sparse=["csr", "csc", "coo"], reset=False) + return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + + def predict(self, X): + """ + Predict using the linear model. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Samples. + + Returns + ------- + C : array, shape (n_samples,) + Returns predicted values. + """ + return self._decision_function(X) + + def _set_intercept(self, X_offset, y_offset, X_scale): + """Set the intercept_""" + if self.fit_intercept: + # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from + # coef_.dtype if warm_start=True. + self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype) + self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T) + else: + self.intercept_ = 0.0 + + def _more_tags(self): + return {"requires_y": True} + + +# XXX Should this derive from LinearModel? It should be a mixin, not an ABC. +# Maybe the n_features checking can be moved to LinearModel. +class LinearClassifierMixin(ClassifierMixin): + """Mixin for linear classifiers. + + Handles prediction for sparse and dense X. + """ + + def decision_function(self, X): + """ + Predict confidence scores for samples. + + The confidence score for a sample is proportional to the signed + distance of that sample to the hyperplane. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix for which we want to get the confidence scores. + + Returns + ------- + scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Confidence scores per `(n_samples, n_classes)` combination. In the + binary case, confidence score for `self.classes_[1]` where >0 means + this class would be predicted. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + + X = self._validate_data(X, accept_sparse="csr", reset=False) + scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + return xp.reshape(scores, (-1,)) if scores.shape[1] == 1 else scores + + def predict(self, X): + """ + Predict class labels for samples in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix for which we want to get the predictions. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Vector containing the class labels for each sample. + """ + xp, _ = get_namespace(X) + scores = self.decision_function(X) + if len(scores.shape) == 1: + indices = xp.astype(scores > 0, int) + else: + indices = xp.argmax(scores, axis=1) + + return xp.take(self.classes_, indices, axis=0) + + def _predict_proba_lr(self, X): + """Probability estimation for OvR logistic regression. + + Positive class probabilities are computed as + 1. / (1. + np.exp(-self.decision_function(X))); + multiclass is handled by normalizing that over all classes. + """ + prob = self.decision_function(X) + expit(prob, out=prob) + if prob.ndim == 1: + return np.vstack([1 - prob, prob]).T + else: + # OvR normalization, like LibLinear's predict_probability + prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) + return prob + + +class SparseCoefMixin: + """Mixin for converting coef_ to and from CSR format. + + L1-regularizing estimators should inherit this. + """ + + def densify(self): + """ + Convert coefficient matrix to dense array format. + + Converts the ``coef_`` member (back) to a numpy.ndarray. This is the + default format of ``coef_`` and is required for fitting, so calling + this method is only required on models that have previously been + sparsified; otherwise, it is a no-op. + + Returns + ------- + self + Fitted estimator. + """ + msg = "Estimator, %(name)s, must be fitted before densifying." + check_is_fitted(self, msg=msg) + if sp.issparse(self.coef_): + self.coef_ = self.coef_.toarray() + return self + + def sparsify(self): + """ + Convert coefficient matrix to sparse format. + + Converts the ``coef_`` member to a scipy.sparse matrix, which for + L1-regularized models can be much more memory- and storage-efficient + than the usual numpy.ndarray representation. + + The ``intercept_`` member is not converted. + + Returns + ------- + self + Fitted estimator. + + Notes + ----- + For non-sparse models, i.e. when there are not many zeros in ``coef_``, + this may actually *increase* memory usage, so use this method with + care. A rule of thumb is that the number of zero elements, which can + be computed with ``(coef_ == 0).sum()``, must be more than 50% for this + to provide significant benefits. + + After calling this method, further fitting with the partial_fit + method (if any) will not work until you call densify. + """ + msg = "Estimator, %(name)s, must be fitted before sparsifying." + check_is_fitted(self, msg=msg) + self.coef_ = sp.csr_matrix(self.coef_) + return self + + +class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel): + """ + Ordinary least squares Linear Regression. + + LinearRegression fits a linear model with coefficients w = (w1, ..., wp) + to minimize the residual sum of squares between the observed targets in + the dataset, and the targets predicted by the linear approximation. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + n_jobs : int, default=None + The number of jobs to use for the computation. This will only provide + speedup in case of sufficiently large problems, that is if firstly + `n_targets > 1` and secondly `X` is sparse or if `positive` is set + to `True`. ``None`` means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. See :term:`Glossary ` for more details. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. This + option is only supported for dense arrays. + + .. versionadded:: 0.24 + + Attributes + ---------- + coef_ : array of shape (n_features, ) or (n_targets, n_features) + Estimated coefficients for the linear regression problem. + If multiple targets are passed during the fit (y 2D), this + is a 2D array of shape (n_targets, n_features), while if only + one target is passed, this is a 1D array of length n_features. + + rank_ : int + Rank of matrix `X`. Only available when `X` is dense. + + singular_ : array of shape (min(X, y),) + Singular values of `X`. Only available when `X` is dense. + + intercept_ : float or array of shape (n_targets,) + Independent term in the linear model. Set to 0.0 if + `fit_intercept = False`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression addresses some of the + problems of Ordinary Least Squares by imposing a penalty on the + size of the coefficients with l2 regularization. + Lasso : The Lasso is a linear model that estimates + sparse coefficients with l1 regularization. + ElasticNet : Elastic-Net is a linear regression + model trained with both l1 and l2 -norm regularization of the + coefficients. + + Notes + ----- + From the implementation point of view, this is just plain Ordinary + Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares + (scipy.optimize.nnls) wrapped as a predictor object. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) + >>> # y = 1 * x_0 + 2 * x_1 + 3 + >>> y = np.dot(X, np.array([1, 2])) + 3 + >>> reg = LinearRegression().fit(X, y) + >>> reg.score(X, y) + 1.0 + >>> reg.coef_ + array([1., 2.]) + >>> reg.intercept_ + 3.0... + >>> reg.predict(np.array([[3, 5]])) + array([16.]) + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "n_jobs": [None, Integral], + "positive": ["boolean"], + } + + def __init__( + self, + *, + fit_intercept=True, + copy_X=True, + n_jobs=None, + positive=False, + ): + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.n_jobs = n_jobs + self.positive = positive + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """ + Fit linear model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.17 + parameter *sample_weight* support to LinearRegression. + + Returns + ------- + self : object + Fitted Estimator. + """ + n_jobs_ = self.n_jobs + + accept_sparse = False if self.positive else ["csr", "csc", "coo"] + + X, y = self._validate_data( + X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True + ) + + has_sw = sample_weight is not None + if has_sw: + sample_weight = _check_sample_weight( + sample_weight, X, dtype=X.dtype, only_non_negative=True + ) + + # Note that neither _rescale_data nor the rest of the fit method of + # LinearRegression can benefit from in-place operations when X is a + # sparse matrix. Therefore, let's not copy X when it is sparse. + copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X) + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=copy_X_in_preprocess_data, + sample_weight=sample_weight, + ) + + if has_sw: + # Sample weight can be implemented via a simple rescaling. Note + # that we safely do inplace rescaling when _preprocess_data has + # already made a copy if requested. + X, y, sample_weight_sqrt = _rescale_data( + X, y, sample_weight, inplace=copy_X_in_preprocess_data + ) + + if self.positive: + if y.ndim < 2: + self.coef_ = optimize.nnls(X, y)[0] + else: + # scipy.optimize.nnls cannot handle y with shape (M, K) + outs = Parallel(n_jobs=n_jobs_)( + delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1]) + ) + self.coef_ = np.vstack([out[0] for out in outs]) + elif sp.issparse(X): + X_offset_scale = X_offset / X_scale + + if has_sw: + + def matvec(b): + return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale) + + def rmatvec(b): + return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt) + + else: + + def matvec(b): + return X.dot(b) - b.dot(X_offset_scale) + + def rmatvec(b): + return X.T.dot(b) - X_offset_scale * b.sum() + + X_centered = sparse.linalg.LinearOperator( + shape=X.shape, matvec=matvec, rmatvec=rmatvec + ) + + if y.ndim < 2: + self.coef_ = lsqr(X_centered, y)[0] + else: + # sparse_lstsq cannot handle y with shape (M, K) + outs = Parallel(n_jobs=n_jobs_)( + delayed(lsqr)(X_centered, y[:, j].ravel()) + for j in range(y.shape[1]) + ) + self.coef_ = np.vstack([out[0] for out in outs]) + else: + self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y) + self.coef_ = self.coef_.T + + if y.ndim == 1: + self.coef_ = np.ravel(self.coef_) + self._set_intercept(X_offset, y_offset, X_scale) + return self + + +def _check_precomputed_gram_matrix( + X, precompute, X_offset, X_scale, rtol=None, atol=1e-5 +): + """Computes a single element of the gram matrix and compares it to + the corresponding element of the user supplied gram matrix. + + If the values do not match a ValueError will be thrown. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data array. + + precompute : array-like of shape (n_features, n_features) + User-supplied gram matrix. + + X_offset : ndarray of shape (n_features,) + Array of feature means used to center design matrix. + + X_scale : ndarray of shape (n_features,) + Array of feature scale factors used to normalize design matrix. + + rtol : float, default=None + Relative tolerance; see numpy.allclose + If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7 + otherwise. + + atol : float, default=1e-5 + absolute tolerance; see :func`numpy.allclose`. Note that the default + here is more tolerant than the default for + :func:`numpy.testing.assert_allclose`, where `atol=0`. + + Raises + ------ + ValueError + Raised when the provided Gram matrix is not consistent. + """ + + n_features = X.shape[1] + f1 = n_features // 2 + f2 = min(f1 + 1, n_features - 1) + + v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1] + v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2] + + expected = np.dot(v1, v2) + actual = precompute[f1, f2] + + dtypes = [precompute.dtype, expected.dtype] + if rtol is None: + rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes] + rtol = max(rtols) + + if not np.isclose(expected, actual, rtol=rtol, atol=atol): + raise ValueError( + "Gram matrix passed in via 'precompute' parameter " + "did not pass validation when a single element was " + "checked - please check that it was computed " + f"properly. For element ({f1},{f2}) we computed " + f"{expected} but the user-supplied value was " + f"{actual}." + ) + + +def _pre_fit( + X, + y, + Xy, + precompute, + fit_intercept, + copy, + check_input=True, + sample_weight=None, +): + """Function used at beginning of fit in linear models with L1 or L0 penalty. + + This function applies _preprocess_data and additionally computes the gram matrix + `precompute` as needed as well as `Xy`. + """ + n_samples, n_features = X.shape + + if sparse.issparse(X): + # copy is not needed here as X is not modified inplace when X is sparse + precompute = False + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=fit_intercept, + copy=False, + check_input=check_input, + sample_weight=sample_weight, + ) + else: + # copy was done in fit if necessary + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=fit_intercept, + copy=copy, + check_input=check_input, + sample_weight=sample_weight, + ) + # Rescale only in dense case. Sparse cd solver directly deals with + # sample_weight. + if sample_weight is not None: + # This triggers copies anyway. + X, y, _ = _rescale_data(X, y, sample_weight=sample_weight) + + if hasattr(precompute, "__array__"): + if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)): + warnings.warn( + ( + "Gram matrix was provided but X was centered to fit " + "intercept: recomputing Gram matrix." + ), + UserWarning, + ) + # TODO: instead of warning and recomputing, we could just center + # the user provided Gram matrix a-posteriori (after making a copy + # when `copy=True`). + # recompute Gram + precompute = "auto" + Xy = None + elif check_input: + # If we're going to use the user's precomputed gram matrix, we + # do a quick check to make sure its not totally bogus. + _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale) + + # precompute if n_samples > n_features + if isinstance(precompute, str) and precompute == "auto": + precompute = n_samples > n_features + + if precompute is True: + # make sure that the 'precompute' array is contiguous. + precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C") + np.dot(X.T, X, out=precompute) + + if not hasattr(precompute, "__array__"): + Xy = None # cannot use Xy if precompute is not Gram + + if hasattr(precompute, "__array__") and Xy is None: + common_dtype = np.result_type(X.dtype, y.dtype) + if y.ndim == 1: + # Xy is 1d, make sure it is contiguous. + Xy = np.empty(shape=n_features, dtype=common_dtype, order="C") + np.dot(X.T, y, out=Xy) + else: + # Make sure that Xy is always F contiguous even if X or y are not + # contiguous: the goal is to make it fast to extract the data for a + # specific target. + n_targets = y.shape[1] + Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F") + np.dot(y.T, X, out=Xy.T) + + return X, y, X_offset, y_offset, X_scale, precompute, Xy diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6c4c90b72f3e0f150b45b1b45ba8c062520dce96 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_huber.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..554f693061116187907250799023205e614bef4c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_huber.py @@ -0,0 +1,352 @@ +# Authors: Manoj Kumar mks542@nyu.edu +# License: BSD 3 clause + +from numbers import Integral, Real + +import numpy as np +from scipy import optimize + +from ..base import BaseEstimator, RegressorMixin, _fit_context +from ..utils import axis0_safe_slice +from ..utils._param_validation import Interval +from ..utils.extmath import safe_sparse_dot +from ..utils.optimize import _check_optimize_result +from ..utils.validation import _check_sample_weight +from ._base import LinearModel + + +def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None): + """Returns the Huber loss and the gradient. + + Parameters + ---------- + w : ndarray, shape (n_features + 1,) or (n_features + 2,) + Feature vector. + w[:n_features] gives the coefficients + w[-1] gives the scale factor and if the intercept is fit w[-2] + gives the intercept factor. + + X : ndarray of shape (n_samples, n_features) + Input data. + + y : ndarray of shape (n_samples,) + Target vector. + + epsilon : float + Robustness of the Huber estimator. + + alpha : float + Regularization parameter. + + sample_weight : ndarray of shape (n_samples,), default=None + Weight assigned to each sample. + + Returns + ------- + loss : float + Huber loss. + + gradient : ndarray, shape (len(w)) + Returns the derivative of the Huber loss with respect to each + coefficient, intercept and the scale as a vector. + """ + _, n_features = X.shape + fit_intercept = n_features + 2 == w.shape[0] + if fit_intercept: + intercept = w[-2] + sigma = w[-1] + w = w[:n_features] + n_samples = np.sum(sample_weight) + + # Calculate the values where |y - X'w -c / sigma| > epsilon + # The values above this threshold are outliers. + linear_loss = y - safe_sparse_dot(X, w) + if fit_intercept: + linear_loss -= intercept + abs_linear_loss = np.abs(linear_loss) + outliers_mask = abs_linear_loss > epsilon * sigma + + # Calculate the linear loss due to the outliers. + # This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma + outliers = abs_linear_loss[outliers_mask] + num_outliers = np.count_nonzero(outliers_mask) + n_non_outliers = X.shape[0] - num_outliers + + # n_sq_outliers includes the weight give to the outliers while + # num_outliers is just the number of outliers. + outliers_sw = sample_weight[outliers_mask] + n_sw_outliers = np.sum(outliers_sw) + outlier_loss = ( + 2.0 * epsilon * np.sum(outliers_sw * outliers) + - sigma * n_sw_outliers * epsilon**2 + ) + + # Calculate the quadratic loss due to the non-outliers.- + # This is equal to |(y - X'w - c)**2 / sigma**2| * sigma + non_outliers = linear_loss[~outliers_mask] + weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers + weighted_loss = np.dot(weighted_non_outliers.T, non_outliers) + squared_loss = weighted_loss / sigma + + if fit_intercept: + grad = np.zeros(n_features + 2) + else: + grad = np.zeros(n_features + 1) + + # Gradient due to the squared loss. + X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers) + grad[:n_features] = ( + 2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers) + ) + + # Gradient due to the linear loss. + signed_outliers = np.ones_like(outliers) + signed_outliers_mask = linear_loss[outliers_mask] < 0 + signed_outliers[signed_outliers_mask] = -1.0 + X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers) + sw_outliers = sample_weight[outliers_mask] * signed_outliers + grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers)) + + # Gradient due to the penalty. + grad[:n_features] += alpha * 2.0 * w + + # Gradient due to sigma. + grad[-1] = n_samples + grad[-1] -= n_sw_outliers * epsilon**2 + grad[-1] -= squared_loss / sigma + + # Gradient due to the intercept. + if fit_intercept: + grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma + grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers) + + loss = n_samples * sigma + squared_loss + outlier_loss + loss += alpha * np.dot(w, w) + return loss, grad + + +class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator): + """L2-regularized linear regression model that is robust to outliers. + + The Huber Regressor optimizes the squared loss for the samples where + ``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples + where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients + ``w``, the intercept ``c`` and the scale ``sigma`` are parameters + to be optimized. The parameter sigma makes sure that if y is scaled up + or down by a certain factor, one does not need to rescale epsilon to + achieve the same robustness. Note that this does not take into account + the fact that the different features of X may be of different scales. + + The Huber loss function has the advantage of not being heavily influenced + by the outliers while not completely ignoring their effect. + + Read more in the :ref:`User Guide ` + + .. versionadded:: 0.18 + + Parameters + ---------- + epsilon : float, default=1.35 + The parameter epsilon controls the number of samples that should be + classified as outliers. The smaller the epsilon, the more robust it is + to outliers. Epsilon must be in the range `[1, inf)`. + + max_iter : int, default=100 + Maximum number of iterations that + ``scipy.optimize.minimize(method="L-BFGS-B")`` should run for. + + alpha : float, default=0.0001 + Strength of the squared L2 regularization. Note that the penalty is + equal to ``alpha * ||w||^2``. + Must be in the range `[0, inf)`. + + warm_start : bool, default=False + This is useful if the stored attributes of a previously used model + has to be reused. If set to False, then the coefficients will + be rewritten for every call to fit. + See :term:`the Glossary `. + + fit_intercept : bool, default=True + Whether or not to fit the intercept. This can be set to False + if the data is already centered around the origin. + + tol : float, default=1e-05 + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n}`` <= ``tol`` + where pg_i is the i-th component of the projected gradient. + + Attributes + ---------- + coef_ : array, shape (n_features,) + Features got by optimizing the L2-regularized Huber loss. + + intercept_ : float + Bias. + + scale_ : float + The value by which ``|y - Xw - c|`` is scaled down. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations that + ``scipy.optimize.minimize(method="L-BFGS-B")`` has run for. + + .. versionchanged:: 0.20 + + In SciPy <= 1.0.0 the number of lbfgs iterations may exceed + ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. + + outliers_ : array, shape (n_samples,) + A boolean mask which is set to True where the samples are identified + as outliers. + + See Also + -------- + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + .. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics + Concomitant scale estimates, pg 172 + .. [2] Art B. Owen (2006), A robust hybrid of lasso and ridge regression. + https://statweb.stanford.edu/~owen/reports/hhu.pdf + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import HuberRegressor, LinearRegression + >>> from sklearn.datasets import make_regression + >>> rng = np.random.RandomState(0) + >>> X, y, coef = make_regression( + ... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0) + >>> X[:4] = rng.uniform(10, 20, (4, 2)) + >>> y[:4] = rng.uniform(10, 20, 4) + >>> huber = HuberRegressor().fit(X, y) + >>> huber.score(X, y) + -7.284... + >>> huber.predict(X[:1,]) + array([806.7200...]) + >>> linear = LinearRegression().fit(X, y) + >>> print("True coefficients:", coef) + True coefficients: [20.4923... 34.1698...] + >>> print("Huber coefficients:", huber.coef_) + Huber coefficients: [17.7906... 31.0106...] + >>> print("Linear Regression coefficients:", linear.coef_) + Linear Regression coefficients: [-1.9221... 7.0226...] + """ + + _parameter_constraints: dict = { + "epsilon": [Interval(Real, 1.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha": [Interval(Real, 0, None, closed="left")], + "warm_start": ["boolean"], + "fit_intercept": ["boolean"], + "tol": [Interval(Real, 0.0, None, closed="left")], + } + + def __init__( + self, + *, + epsilon=1.35, + max_iter=100, + alpha=0.0001, + warm_start=False, + fit_intercept=True, + tol=1e-05, + ): + self.epsilon = epsilon + self.max_iter = max_iter + self.alpha = alpha + self.warm_start = warm_start + self.fit_intercept = fit_intercept + self.tol = tol + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like, shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like, shape (n_samples,) + Weight given to each sample. + + Returns + ------- + self : object + Fitted `HuberRegressor` estimator. + """ + X, y = self._validate_data( + X, + y, + copy=False, + accept_sparse=["csr"], + y_numeric=True, + dtype=[np.float64, np.float32], + ) + + sample_weight = _check_sample_weight(sample_weight, X) + + if self.warm_start and hasattr(self, "coef_"): + parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_])) + else: + if self.fit_intercept: + parameters = np.zeros(X.shape[1] + 2) + else: + parameters = np.zeros(X.shape[1] + 1) + # Make sure to initialize the scale parameter to a strictly + # positive value: + parameters[-1] = 1 + + # Sigma or the scale factor should be non-negative. + # Setting it to be zero might cause undefined bounds hence we set it + # to a value close to zero. + bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1)) + bounds[-1][0] = np.finfo(np.float64).eps * 10 + + opt_res = optimize.minimize( + _huber_loss_and_gradient, + parameters, + method="L-BFGS-B", + jac=True, + args=(X, y, self.epsilon, self.alpha, sample_weight), + options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1}, + bounds=bounds, + ) + + parameters = opt_res.x + + if opt_res.status == 2: + raise ValueError( + "HuberRegressor convergence failed: l-BFGS-b solver terminated with %s" + % opt_res.message + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter) + self.scale_ = parameters[-1] + if self.fit_intercept: + self.intercept_ = parameters[-2] + else: + self.intercept_ = 0.0 + self.coef_ = parameters[: X.shape[1]] + + residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_) + self.outliers_ = residual > self.scale_ * self.epsilon + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_omp.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ee0f7790535913bcd427f101ee61df2fad5eb0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_omp.py @@ -0,0 +1,1097 @@ +"""Orthogonal matching pursuit algorithms +""" + +# Author: Vlad Niculae +# +# License: BSD 3 clause + +import warnings +from math import sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg.lapack import get_lapack_funcs + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..model_selection import check_cv +from ..utils import Bunch, as_float_array, check_array +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ._base import LinearModel, _pre_fit + +premature = ( + "Orthogonal matching pursuit ended prematurely due to linear" + " dependence in the dictionary. The requested precision might" + " not have been met." +) + + +def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): + """Orthogonal Matching Pursuit step using the Cholesky decomposition. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Input dictionary. Columns are assumed to have unit norm. + + y : ndarray of shape (n_samples,) + Input targets. + + n_nonzero_coefs : int + Targeted number of non-zero elements. + + tol : float, default=None + Targeted squared error, if not None overrides n_nonzero_coefs. + + copy_X : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + Returns + ------- + gamma : ndarray of shape (n_nonzero_coefs,) + Non-zero elements of the solution. + + idx : ndarray of shape (n_nonzero_coefs,) + Indices of the positions of the elements in gamma within the solution + vector. + + coef : ndarray of shape (n_features, n_nonzero_coefs) + The first k values of column k correspond to the coefficient value + for the active features at that step. The lower left triangle contains + garbage. Only returned if ``return_path=True``. + + n_active : int + Number of active features at convergence. + """ + if copy_X: + X = X.copy("F") + else: # even if we are allowed to overwrite, still copy it if bad order + X = np.asfortranarray(X) + + min_float = np.finfo(X.dtype).eps + nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,)) + (potrs,) = get_lapack_funcs(("potrs",), (X,)) + + alpha = np.dot(X.T, y) + residual = y + gamma = np.empty(0) + n_active = 0 + indices = np.arange(X.shape[1]) # keeping track of swapping + + max_features = X.shape[1] if tol is not None else n_nonzero_coefs + + L = np.empty((max_features, max_features), dtype=X.dtype) + + if return_path: + coefs = np.empty_like(L) + + while True: + lam = np.argmax(np.abs(np.dot(X.T, residual))) + if lam < n_active or alpha[lam] ** 2 < min_float: + # atom already selected or inner product too small + warnings.warn(premature, RuntimeWarning, stacklevel=2) + break + + if n_active > 0: + # Updates the Cholesky decomposition of X' X + L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + v = nrm2(L[n_active, :n_active]) ** 2 + Lkk = linalg.norm(X[:, lam]) ** 2 - v + if Lkk <= min_float: # selected atoms are dependent + warnings.warn(premature, RuntimeWarning, stacklevel=2) + break + L[n_active, n_active] = sqrt(Lkk) + else: + L[0, 0] = linalg.norm(X[:, lam]) + + X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) + alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] + indices[n_active], indices[lam] = indices[lam], indices[n_active] + n_active += 1 + + # solves LL'x = X'y as a composition of two triangular systems + gamma, _ = potrs( + L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False + ) + + if return_path: + coefs[:n_active, n_active - 1] = gamma + residual = y - np.dot(X[:, :n_active], gamma) + if tol is not None and nrm2(residual) ** 2 <= tol: + break + elif n_active == max_features: + break + + if return_path: + return gamma, indices[:n_active], coefs[:, :n_active], n_active + else: + return gamma, indices[:n_active], n_active + + +def _gram_omp( + Gram, + Xy, + n_nonzero_coefs, + tol_0=None, + tol=None, + copy_Gram=True, + copy_Xy=True, + return_path=False, +): + """Orthogonal Matching Pursuit step on a precomputed Gram matrix. + + This function uses the Cholesky decomposition method. + + Parameters + ---------- + Gram : ndarray of shape (n_features, n_features) + Gram matrix of the input data matrix. + + Xy : ndarray of shape (n_features,) + Input targets. + + n_nonzero_coefs : int + Targeted number of non-zero elements. + + tol_0 : float, default=None + Squared norm of y, required if tol is not None. + + tol : float, default=None + Targeted squared error, if not None overrides n_nonzero_coefs. + + copy_Gram : bool, default=True + Whether the gram matrix must be copied by the algorithm. A false + value is only helpful if it is already Fortran-ordered, otherwise a + copy is made anyway. + + copy_Xy : bool, default=True + Whether the covariance vector Xy must be copied by the algorithm. + If False, it may be overwritten. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + Returns + ------- + gamma : ndarray of shape (n_nonzero_coefs,) + Non-zero elements of the solution. + + idx : ndarray of shape (n_nonzero_coefs,) + Indices of the positions of the elements in gamma within the solution + vector. + + coefs : ndarray of shape (n_features, n_nonzero_coefs) + The first k values of column k correspond to the coefficient value + for the active features at that step. The lower left triangle contains + garbage. Only returned if ``return_path=True``. + + n_active : int + Number of active features at convergence. + """ + Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram) + + if copy_Xy or not Xy.flags.writeable: + Xy = Xy.copy() + + min_float = np.finfo(Gram.dtype).eps + nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,)) + (potrs,) = get_lapack_funcs(("potrs",), (Gram,)) + + indices = np.arange(len(Gram)) # keeping track of swapping + alpha = Xy + tol_curr = tol_0 + delta = 0 + gamma = np.empty(0) + n_active = 0 + + max_features = len(Gram) if tol is not None else n_nonzero_coefs + + L = np.empty((max_features, max_features), dtype=Gram.dtype) + + L[0, 0] = 1.0 + if return_path: + coefs = np.empty_like(L) + + while True: + lam = np.argmax(np.abs(alpha)) + if lam < n_active or alpha[lam] ** 2 < min_float: + # selected same atom twice, or inner product too small + warnings.warn(premature, RuntimeWarning, stacklevel=3) + break + if n_active > 0: + L[n_active, :n_active] = Gram[lam, :n_active] + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + v = nrm2(L[n_active, :n_active]) ** 2 + Lkk = Gram[lam, lam] - v + if Lkk <= min_float: # selected atoms are dependent + warnings.warn(premature, RuntimeWarning, stacklevel=3) + break + L[n_active, n_active] = sqrt(Lkk) + else: + L[0, 0] = sqrt(Gram[lam, lam]) + + Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) + Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) + indices[n_active], indices[lam] = indices[lam], indices[n_active] + Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] + n_active += 1 + # solves LL'x = X'y as a composition of two triangular systems + gamma, _ = potrs( + L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False + ) + if return_path: + coefs[:n_active, n_active - 1] = gamma + beta = np.dot(Gram[:, :n_active], gamma) + alpha = Xy - beta + if tol is not None: + tol_curr += delta + delta = np.inner(gamma, beta[:n_active]) + tol_curr -= delta + if abs(tol_curr) <= tol: + break + elif n_active == max_features: + break + + if return_path: + return gamma, indices[:n_active], coefs[:, :n_active], n_active + else: + return gamma, indices[:n_active], n_active + + +@validate_params( + { + "X": ["array-like"], + "y": [np.ndarray], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "precompute": ["boolean", StrOptions({"auto"})], + "copy_X": ["boolean"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def orthogonal_mp( + X, + y, + *, + n_nonzero_coefs=None, + tol=None, + precompute=False, + copy_X=True, + return_path=False, + return_n_iter=False, +): + r"""Orthogonal Matching Pursuit (OMP). + + Solves n_targets Orthogonal Matching Pursuit problems. + An instance of the problem has the form: + + When parametrized by the number of non-zero coefficients using + `n_nonzero_coefs`: + argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} + + When parametrized by error using the parameter `tol`: + argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. Columns are assumed to have unit norm. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Input targets. + + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. If None (by + default) this value is set to 10% of n_features. + + tol : float, default=None + Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. + + precompute : 'auto' or bool, default=False + Whether to perform precomputations. Improves performance when n_targets + or n_samples is very large. + + copy_X : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_features, n_targets) + Coefficients of the OMP solution. If `return_path=True`, this contains + the whole coefficient path. In this case its shape is + (n_features, n_features) or (n_features, n_targets, n_features) and + iterating over the last axis generates coefficients in increasing order + of active features. + + n_iters : array-like or int + Number of active features across every target. Returned only if + `return_n_iter` is set to True. + + See Also + -------- + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. + orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + """ + X = check_array(X, order="F", copy=copy_X) + copy_X = False + if y.ndim == 1: + y = y.reshape(-1, 1) + y = check_array(y) + if y.shape[1] > 1: # subsequent targets will be affected + copy_X = True + if n_nonzero_coefs is None and tol is None: + # default for n_nonzero_coefs is 0.1 * n_features + # but at least one. + n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) + if tol is None and n_nonzero_coefs > X.shape[1]: + raise ValueError( + "The number of atoms cannot be more than the number of features" + ) + if precompute == "auto": + precompute = X.shape[0] > X.shape[1] + if precompute: + G = np.dot(X.T, X) + G = np.asfortranarray(G) + Xy = np.dot(X.T, y) + if tol is not None: + norms_squared = np.sum((y**2), axis=0) + else: + norms_squared = None + return orthogonal_mp_gram( + G, + Xy, + n_nonzero_coefs=n_nonzero_coefs, + tol=tol, + norms_squared=norms_squared, + copy_Gram=copy_X, + copy_Xy=False, + return_path=return_path, + ) + + if return_path: + coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) + else: + coef = np.zeros((X.shape[1], y.shape[1])) + n_iters = [] + + for k in range(y.shape[1]): + out = _cholesky_omp( + X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path + ) + if return_path: + _, idx, coefs, n_iter = out + coef = coef[:, :, : len(idx)] + for n_active, x in enumerate(coefs.T): + coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] + else: + x, idx, n_iter = out + coef[idx, k] = x + n_iters.append(n_iter) + + if y.shape[1] == 1: + n_iters = n_iters[0] + + if return_n_iter: + return np.squeeze(coef), n_iters + else: + return np.squeeze(coef) + + +@validate_params( + { + "Gram": ["array-like"], + "Xy": ["array-like"], + "n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "norms_squared": ["array-like", None], + "copy_Gram": ["boolean"], + "copy_Xy": ["boolean"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def orthogonal_mp_gram( + Gram, + Xy, + *, + n_nonzero_coefs=None, + tol=None, + norms_squared=None, + copy_Gram=True, + copy_Xy=True, + return_path=False, + return_n_iter=False, +): + """Gram Orthogonal Matching Pursuit (OMP). + + Solves n_targets Orthogonal Matching Pursuit problems using only + the Gram matrix X.T * X and the product X.T * y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Gram : array-like of shape (n_features, n_features) + Gram matrix of the input data: `X.T * X`. + + Xy : array-like of shape (n_features,) or (n_features, n_targets) + Input targets multiplied by `X`: `X.T * y`. + + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. If `None` (by + default) this value is set to 10% of n_features. + + tol : float, default=None + Maximum squared norm of the residual. If not `None`, + overrides `n_nonzero_coefs`. + + norms_squared : array-like of shape (n_targets,), default=None + Squared L2 norms of the lines of `y`. Required if `tol` is not None. + + copy_Gram : bool, default=True + Whether the gram matrix must be copied by the algorithm. A `False` + value is only helpful if it is already Fortran-ordered, otherwise a + copy is made anyway. + + copy_Xy : bool, default=True + Whether the covariance vector `Xy` must be copied by the algorithm. + If `False`, it may be overwritten. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_features, n_targets) + Coefficients of the OMP solution. If `return_path=True`, this contains + the whole coefficient path. In this case its shape is + `(n_features, n_features)` or `(n_features, n_targets, n_features)` and + iterating over the last axis yields coefficients in increasing order + of active features. + + n_iters : list or int + Number of active features across every target. Returned only if + `return_n_iter` is set to True. + + See Also + -------- + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + lars_path : Compute Least Angle Regression or Lasso path using + LARS algorithm. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + + Notes + ----- + Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + """ + Gram = check_array(Gram, order="F", copy=copy_Gram) + Xy = np.asarray(Xy) + if Xy.ndim > 1 and Xy.shape[1] > 1: + # or subsequent target will be affected + copy_Gram = True + if Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + if tol is not None: + norms_squared = [norms_squared] + if copy_Xy or not Xy.flags.writeable: + # Make the copy once instead of many times in _gram_omp itself. + Xy = Xy.copy() + + if n_nonzero_coefs is None and tol is None: + n_nonzero_coefs = int(0.1 * len(Gram)) + if tol is not None and norms_squared is None: + raise ValueError( + "Gram OMP needs the precomputed norms in order " + "to evaluate the error sum of squares." + ) + if tol is not None and tol < 0: + raise ValueError("Epsilon cannot be negative") + if tol is None and n_nonzero_coefs <= 0: + raise ValueError("The number of atoms must be positive") + if tol is None and n_nonzero_coefs > len(Gram): + raise ValueError( + "The number of atoms cannot be more than the number of features" + ) + + if return_path: + coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) + else: + coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) + + n_iters = [] + for k in range(Xy.shape[1]): + out = _gram_omp( + Gram, + Xy[:, k], + n_nonzero_coefs, + norms_squared[k] if tol is not None else None, + tol, + copy_Gram=copy_Gram, + copy_Xy=False, + return_path=return_path, + ) + if return_path: + _, idx, coefs, n_iter = out + coef = coef[:, :, : len(idx)] + for n_active, x in enumerate(coefs.T): + coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] + else: + x, idx, n_iter = out + coef[idx, k] = x + n_iters.append(n_iter) + + if Xy.shape[1] == 1: + n_iters = n_iters[0] + + if return_n_iter: + return np.squeeze(coef), n_iters + else: + return np.squeeze(coef) + + +class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel): + """Orthogonal Matching Pursuit model (OMP). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. If None (by + default) this value is set to 10% of n_features. + + tol : float, default=None + Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto' or bool, default='auto' + Whether to use a precomputed Gram and Xy matrix to speed up + calculations. Improves performance when :term:`n_targets` or + :term:`n_samples` is very large. Note that if you already have such + matrices, you can pass them directly to the fit method. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formula). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int or array-like + Number of active features across every target. + + n_nonzero_coefs_ : int + The number of non-zero coefficients in the solution. If + `n_nonzero_coefs` is None and `tol` is None this value is either set + to 10% of `n_features` or 1, whichever is greater. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit + problems using only the Gram matrix X.T * X and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + OrthogonalMatchingPursuitCV : Cross-validated + Orthogonal Matching Pursuit model (OMP). + + Notes + ----- + Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + + Examples + -------- + >>> from sklearn.linear_model import OrthogonalMatchingPursuit + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4, random_state=0) + >>> reg = OrthogonalMatchingPursuit().fit(X, y) + >>> reg.score(X, y) + 0.9991... + >>> reg.predict(X[:1,]) + array([-78.3854...]) + """ + + _parameter_constraints: dict = { + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "fit_intercept": ["boolean"], + "precompute": [StrOptions({"auto"}), "boolean"], + } + + def __init__( + self, + *, + n_nonzero_coefs=None, + tol=None, + fit_intercept=True, + precompute="auto", + ): + self.n_nonzero_coefs = n_nonzero_coefs + self.tol = tol + self.fit_intercept = fit_intercept + self.precompute = precompute + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Returns an instance of self. + """ + X, y = self._validate_data(X, y, multi_output=True, y_numeric=True) + n_features = X.shape[1] + + X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit( + X, y, None, self.precompute, self.fit_intercept, copy=True + ) + + if y.ndim == 1: + y = y[:, np.newaxis] + + if self.n_nonzero_coefs is None and self.tol is None: + # default for n_nonzero_coefs is 0.1 * n_features + # but at least one. + self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) + else: + self.n_nonzero_coefs_ = self.n_nonzero_coefs + + if Gram is False: + coef_, self.n_iter_ = orthogonal_mp( + X, + y, + n_nonzero_coefs=self.n_nonzero_coefs_, + tol=self.tol, + precompute=False, + copy_X=True, + return_n_iter=True, + ) + else: + norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None + + coef_, self.n_iter_ = orthogonal_mp_gram( + Gram, + Xy=Xy, + n_nonzero_coefs=self.n_nonzero_coefs_, + tol=self.tol, + norms_squared=norms_sq, + copy_Gram=True, + copy_Xy=True, + return_n_iter=True, + ) + self.coef_ = coef_.T + self._set_intercept(X_offset, y_offset, X_scale) + return self + + +def _omp_path_residues( + X_train, + y_train, + X_test, + y_test, + copy=True, + fit_intercept=True, + max_iter=100, +): + """Compute the residues on left-out data for a full LARS path. + + Parameters + ---------- + X_train : ndarray of shape (n_samples, n_features) + The data to fit the LARS on. + + y_train : ndarray of shape (n_samples) + The target variable to fit LARS on. + + X_test : ndarray of shape (n_samples, n_features) + The data to compute the residues on. + + y_test : ndarray of shape (n_samples) + The target variable to compute the residues on. + + copy : bool, default=True + Whether X_train, X_test, y_train and y_test should be copied. If + False, they may be overwritten. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=100 + Maximum numbers of iterations to perform, therefore maximum features + to include. 100 by default. + + Returns + ------- + residues : ndarray of shape (n_samples, max_features) + Residues of the prediction on the test data. + """ + + if copy: + X_train = X_train.copy() + y_train = y_train.copy() + X_test = X_test.copy() + y_test = y_test.copy() + + if fit_intercept: + X_mean = X_train.mean(axis=0) + X_train -= X_mean + X_test -= X_mean + y_mean = y_train.mean(axis=0) + y_train = as_float_array(y_train, copy=False) + y_train -= y_mean + y_test = as_float_array(y_test, copy=False) + y_test -= y_mean + + coefs = orthogonal_mp( + X_train, + y_train, + n_nonzero_coefs=max_iter, + tol=None, + precompute=False, + copy_X=False, + return_path=True, + ) + if coefs.ndim == 1: + coefs = coefs[:, np.newaxis] + + return np.dot(coefs.T, X_test.T) - y_test + + +class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel): + """Cross-validated Orthogonal Matching Pursuit model (OMP). + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + copy : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=None + Maximum numbers of iterations to perform, therefore maximum features + to include. 10% of ``n_features`` but at least 5 if available. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool or int, default=False + Sets the verbosity amount. + + Attributes + ---------- + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the problem formulation). + + n_nonzero_coefs_ : int + Estimated number of non-zero coefficients giving the best mean squared + error over the cross-validation folds. + + n_iter_ : int or array-like + Number of active features across every target for the model refit with + the best hyperparameters got by cross-validating across all folds. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit + problems using only the Gram matrix X.T * X and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). + LarsCV : Cross-validated Least Angle Regression model. + LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + + Notes + ----- + In `fit`, once the optimal number of non-zero coefficients is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import OrthogonalMatchingPursuitCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=100, n_informative=10, + ... noise=4, random_state=0) + >>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9991... + >>> reg.n_nonzero_coefs_ + 10 + >>> reg.predict(X[:1,]) + array([-78.3854...]) + """ + + _parameter_constraints: dict = { + "copy": ["boolean"], + "fit_intercept": ["boolean"], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + copy=True, + fit_intercept=True, + max_iter=None, + cv=None, + n_jobs=None, + verbose=False, + ): + self.copy = copy + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.cv = cv + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, **fit_params): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + **fit_params : dict + Parameters to pass to the underlying splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of self. + """ + _raise_for_params(fit_params, self, "fit") + + X, y = self._validate_data(X, y, y_numeric=True, ensure_min_features=2) + X = as_float_array(X, copy=False, force_all_finite=False) + cv = check_cv(self.cv, classifier=False) + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) + max_iter = ( + min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) + if not self.max_iter + else self.max_iter + ) + cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_omp_path_residues)( + X[train], + y[train], + X[test], + y[test], + self.copy, + self.fit_intercept, + max_iter, + ) + for train, test in cv.split(X, **routed_params.splitter.split) + ) + + min_early_stop = min(fold.shape[0] for fold in cv_paths) + mse_folds = np.array( + [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths] + ) + best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 + self.n_nonzero_coefs_ = best_n_nonzero_coefs + omp = OrthogonalMatchingPursuit( + n_nonzero_coefs=best_n_nonzero_coefs, + fit_intercept=self.fit_intercept, + ).fit(X, y) + + self.coef_ = omp.coef_ + self.intercept_ = omp.intercept_ + self.n_iter_ = omp.n_iter_ + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=self.cv, + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + return router diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py new file mode 100644 index 0000000000000000000000000000000000000000..2de019b6d986c958499e7ac3705e86c0ecde9acb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py @@ -0,0 +1,575 @@ +# Authors: Rob Zinkov, Mathieu Blondel +# License: BSD 3 clause +from numbers import Real + +from ..base import _fit_context +from ..utils._param_validation import Interval, StrOptions +from ._stochastic_gradient import DEFAULT_EPSILON, BaseSGDClassifier, BaseSGDRegressor + + +class PassiveAggressiveClassifier(BaseSGDClassifier): + """Passive Aggressive Classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + C : float, default=1.0 + Maximum step size (regularization). Defaults to 1.0. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + loss : str, default="hinge" + The loss function to be used: + hinge: equivalent to PA-I in the reference paper. + squared_hinge: equivalent to PA-II in the reference paper. + + n_jobs : int or None, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + + class_weight : dict, {class_label: weight} or "balanced" or None, \ + default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + .. versionadded:: 0.17 + parameter *class_weight* to automatically weight samples. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So average=10 will begin averaging after seeing 10 samples. + + .. versionadded:: 0.19 + parameter *average* to use weights averaging in SGD. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + loss_function_ : callable + Loss function used by the algorithm. + + See Also + -------- + SGDClassifier : Incrementally trained logistic regression. + Perceptron : Linear perceptron classifier. + + References + ---------- + Online Passive-Aggressive Algorithms + + K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) + + Examples + -------- + >>> from sklearn.linear_model import PassiveAggressiveClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_features=4, random_state=0) + >>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0, + ... tol=1e-3) + >>> clf.fit(X, y) + PassiveAggressiveClassifier(random_state=0) + >>> print(clf.coef_) + [[0.26642044 0.45070924 0.67251877 0.64185414]] + >>> print(clf.intercept_) + [1.84127814] + >>> print(clf.predict([[0, 0, 0, 0]])) + [1] + """ + + _parameter_constraints: dict = { + **BaseSGDClassifier._parameter_constraints, + "loss": [StrOptions({"hinge", "squared_hinge"})], + "C": [Interval(Real, 0, None, closed="right")], + } + + def __init__( + self, + *, + C=1.0, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + shuffle=True, + verbose=0, + loss="hinge", + n_jobs=None, + random_state=None, + warm_start=False, + class_weight=None, + average=False, + ): + super().__init__( + penalty=None, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + eta0=1.0, + warm_start=warm_start, + class_weight=class_weight, + average=average, + n_jobs=n_jobs, + ) + + self.C = C + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Subset of the training data. + + y : array-like of shape (n_samples,) + Subset of the target values. + + classes : ndarray of shape (n_classes,) + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + Returns + ------- + self : object + Fitted estimator. + """ + if not hasattr(self, "classes_"): + self._more_validate_params(for_partial_fit=True) + + if self.class_weight == "balanced": + raise ValueError( + "class_weight 'balanced' is not supported for " + "partial_fit. For 'balanced' weights, use " + "`sklearn.utils.compute_class_weight` with " + "`class_weight='balanced'`. In place of y you " + "can use a large enough subset of the full " + "training set target to properly estimate the " + "class frequency distributions. Pass the " + "resulting weights as the class_weight " + "parameter." + ) + + lr = "pa1" if self.loss == "hinge" else "pa2" + return self._partial_fit( + X, + y, + alpha=1.0, + C=self.C, + loss="hinge", + learning_rate=lr, + max_iter=1, + classes=classes, + sample_weight=None, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_classes, n_features) + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (n_classes,) + The initial intercept to warm-start the optimization. + + Returns + ------- + self : object + Fitted estimator. + """ + self._more_validate_params() + + lr = "pa1" if self.loss == "hinge" else "pa2" + return self._fit( + X, + y, + alpha=1.0, + C=self.C, + loss="hinge", + learning_rate=lr, + coef_init=coef_init, + intercept_init=intercept_init, + ) + + +class PassiveAggressiveRegressor(BaseSGDRegressor): + """Passive Aggressive Regressor. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + C : float, default=1.0 + Maximum step size (regularization). Defaults to 1.0. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. Defaults to True. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation. + score is not improving. If set to True, it will automatically set aside + a fraction of training data as validation and terminate + training when validation score is not improving by at least tol for + n_iter_no_change consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + loss : str, default="epsilon_insensitive" + The loss function to be used: + epsilon_insensitive: equivalent to PA-I in the reference paper. + squared_epsilon_insensitive: equivalent to PA-II in the reference + paper. + + epsilon : float, default=0.1 + If the difference between the current prediction and the correct label + is below this threshold, the model is not updated. + + random_state : int, RandomState instance, default=None + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So average=10 will begin averaging after seeing 10 samples. + + .. versionadded:: 0.19 + parameter *average* to use weights averaging in SGD. + + Attributes + ---------- + coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ + n_features] + Weights assigned to the features. + + intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + SGDRegressor : Linear model fitted by minimizing a regularized + empirical loss with SGD. + + References + ---------- + Online Passive-Aggressive Algorithms + + K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006). + + Examples + -------- + >>> from sklearn.linear_model import PassiveAggressiveRegressor + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=4, random_state=0) + >>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0, + ... tol=1e-3) + >>> regr.fit(X, y) + PassiveAggressiveRegressor(max_iter=100, random_state=0) + >>> print(regr.coef_) + [20.48736655 34.18818427 67.59122734 87.94731329] + >>> print(regr.intercept_) + [-0.02306214] + >>> print(regr.predict([[0, 0, 0, 0]])) + [-0.02306214] + """ + + _parameter_constraints: dict = { + **BaseSGDRegressor._parameter_constraints, + "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})], + "C": [Interval(Real, 0, None, closed="right")], + "epsilon": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + C=1.0, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + shuffle=True, + verbose=0, + loss="epsilon_insensitive", + epsilon=DEFAULT_EPSILON, + random_state=None, + warm_start=False, + average=False, + ): + super().__init__( + penalty=None, + l1_ratio=0, + epsilon=epsilon, + eta0=1.0, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + warm_start=warm_start, + average=average, + ) + self.C = C + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Subset of training data. + + y : numpy array of shape [n_samples] + Subset of target values. + + Returns + ------- + self : object + Fitted estimator. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" + return self._partial_fit( + X, + y, + alpha=1.0, + C=self.C, + loss="epsilon_insensitive", + learning_rate=lr, + max_iter=1, + sample_weight=None, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : numpy array of shape [n_samples] + Target values. + + coef_init : array, shape = [n_features] + The initial coefficients to warm-start the optimization. + + intercept_init : array, shape = [1] + The initial intercept to warm-start the optimization. + + Returns + ------- + self : object + Fitted estimator. + """ + self._more_validate_params() + + lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" + return self._fit( + X, + y, + alpha=1.0, + C=self.C, + loss="epsilon_insensitive", + learning_rate=lr, + coef_init=coef_init, + intercept_init=intercept_init, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..b97550fa52e8c7f4bce1b38be01827e8c605af96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py @@ -0,0 +1,229 @@ +# Author: Mathieu Blondel +# License: BSD 3 clause +from numbers import Real + +from ..utils._param_validation import Interval, StrOptions +from ._stochastic_gradient import BaseSGDClassifier + + +class Perceptron(BaseSGDClassifier): + """Linear perceptron classifier. + + The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier` + by fixing the `loss` and `learning_rate` parameters as:: + + SGDClassifier(loss="perceptron", learning_rate="constant") + + Other available parameters are described below and are forwarded to + :class:`~sklearn.linear_model.SGDClassifier`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + penalty : {'l2','l1','elasticnet'}, default=None + The penalty (aka regularization term) to be used. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term if regularization is + used. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`. + `l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1. + Only used if `penalty='elasticnet'`. + + .. versionadded:: 0.24 + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + eta0 : float, default=1 + Constant by which the updates are multiplied. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. See + :term:`the Glossary `. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + loss_function_ : concrete LossFunction + The function that determines the loss, or difference between the + output of the algorithm and the target values. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + sklearn.linear_model.SGDClassifier : Linear classifiers + (SVM, logistic regression, etc.) with SGD training. + + Notes + ----- + ``Perceptron`` is a classification algorithm which shares the same + underlying implementation with ``SGDClassifier``. In fact, + ``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron", + eta0=1, learning_rate="constant", penalty=None)`. + + References + ---------- + https://en.wikipedia.org/wiki/Perceptron and references therein. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.linear_model import Perceptron + >>> X, y = load_digits(return_X_y=True) + >>> clf = Perceptron(tol=1e-3, random_state=0) + >>> clf.fit(X, y) + Perceptron() + >>> clf.score(X, y) + 0.939... + """ + + _parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints} + _parameter_constraints.pop("loss") + _parameter_constraints.pop("average") + _parameter_constraints.update( + { + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + ) + + def __init__( + self, + *, + penalty=None, + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + eta0=1.0, + n_jobs=None, + random_state=0, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + ): + super().__init__( + loss="perceptron", + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + learning_rate="constant", + eta0=eta0, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + power_t=0.5, + warm_start=warm_start, + class_weight=class_weight, + n_jobs=n_jobs, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..33451d8640bffd2f32310b7d7e26b3eb7ae130f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py @@ -0,0 +1,308 @@ +# Authors: David Dale +# Christian Lorentzen +# License: BSD 3 clause +import warnings +from numbers import Real + +import numpy as np +from scipy import sparse +from scipy.optimize import linprog + +from ..base import BaseEstimator, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..utils import _safe_indexing +from ..utils._param_validation import Interval, StrOptions +from ..utils.fixes import parse_version, sp_version +from ..utils.validation import _check_sample_weight +from ._base import LinearModel + + +class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator): + """Linear regression model that predicts conditional quantiles. + + The linear :class:`QuantileRegressor` optimizes the pinball loss for a + desired `quantile` and is robust to outliers. + + This model uses an L1 regularization like + :class:`~sklearn.linear_model.Lasso`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + quantile : float, default=0.5 + The quantile that the model tries to predict. It must be strictly + between 0 and 1. If 0.5 (default), the model predicts the 50% + quantile, i.e. the median. + + alpha : float, default=1.0 + Regularization constant that multiplies the L1 penalty term. + + fit_intercept : bool, default=True + Whether or not to fit the intercept. + + solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \ + 'revised simplex'}, default='highs' + Method used by :func:`scipy.optimize.linprog` to solve the linear + programming formulation. + + From `scipy>=1.6.0`, it is recommended to use the highs methods because + they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs" + support sparse input data and, in fact, always convert to sparse csc. + + From `scipy>=1.11.0`, "interior-point" is not available anymore. + + .. versionchanged:: 1.4 + The default of `solver` changed to `"highs"` in version 1.4. + + solver_options : dict, default=None + Additional parameters passed to :func:`scipy.optimize.linprog` as + options. If `None` and if `solver='interior-point'`, then + `{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the + sake of stability. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the features. + + intercept_ : float + The intercept of the model, aka bias term. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations performed by the solver. + + See Also + -------- + Lasso : The Lasso is a linear model that estimates sparse coefficients + with l1 regularization. + HuberRegressor : Linear regression model that is robust to outliers. + + Examples + -------- + >>> from sklearn.linear_model import QuantileRegressor + >>> import numpy as np + >>> n_samples, n_features = 10, 2 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # the two following lines are optional in practice + >>> from sklearn.utils.fixes import sp_version, parse_version + >>> solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + >>> reg = QuantileRegressor(quantile=0.8, solver=solver).fit(X, y) + >>> np.mean(y <= reg.predict(X)) + 0.8 + """ + + _parameter_constraints: dict = { + "quantile": [Interval(Real, 0, 1, closed="neither")], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions( + { + "highs-ds", + "highs-ipm", + "highs", + "interior-point", + "revised simplex", + } + ), + ], + "solver_options": [dict, None], + } + + def __init__( + self, + *, + quantile=0.5, + alpha=1.0, + fit_intercept=True, + solver="highs", + solver_options=None, + ): + self.quantile = quantile + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.solver_options = solver_options + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns self. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csc", "csr", "coo"], + y_numeric=True, + multi_output=False, + ) + sample_weight = _check_sample_weight(sample_weight, X) + + n_features = X.shape[1] + n_params = n_features + + if self.fit_intercept: + n_params += 1 + # Note that centering y and X with _preprocess_data does not work + # for quantile regression. + + # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. + # So we rescale the penalty term, which is equivalent. + alpha = np.sum(sample_weight) * self.alpha + + if self.solver in ( + "highs-ds", + "highs-ipm", + "highs", + ) and sp_version < parse_version("1.6.0"): + raise ValueError( + f"Solver {self.solver} is only available " + f"with scipy>=1.6.0, got {sp_version}" + ) + else: + solver = self.solver + + if solver == "interior-point" and sp_version >= parse_version("1.11.0"): + raise ValueError( + f"Solver {solver} is not anymore available in SciPy >= 1.11.0." + ) + + if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]: + raise ValueError( + f"Solver {self.solver} does not support sparse X. " + "Use solver 'highs' for example." + ) + # make default solver more stable + if self.solver_options is None and solver == "interior-point": + solver_options = {"lstsq": True} + else: + solver_options = self.solver_options + + # After rescaling alpha, the minimization problem is + # min sum(pinball loss) + alpha * L1 + # Use linear programming formulation of quantile regression + # min_x c x + # A_eq x = b_eq + # 0 <= x + # x = (s0, s, t0, t, u, v) = slack variables >= 0 + # intercept = s0 - t0 + # coef = s - t + # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n) + # residual = y - X@coef - intercept = u - v + # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n)) + # b_eq = y + # p = n_features + # n = n_samples + # 1_n = vector of length n with entries equal one + # see https://stats.stackexchange.com/questions/384909/ + # + # Filtering out zero sample weights from the beginning makes life + # easier for the linprog solver. + indices = np.nonzero(sample_weight)[0] + n_indices = len(indices) # use n_mask instead of n_samples + if n_indices < len(sample_weight): + sample_weight = sample_weight[indices] + X = _safe_indexing(X, indices) + y = _safe_indexing(y, indices) + c = np.concatenate( + [ + np.full(2 * n_params, fill_value=alpha), + sample_weight * self.quantile, + sample_weight * (1 - self.quantile), + ] + ) + if self.fit_intercept: + # do not penalize the intercept + c[0] = 0 + c[n_params] = 0 + + if solver in ["highs", "highs-ds", "highs-ipm"]: + # Note that highs methods always use a sparse CSC memory layout internally, + # even for optimization problems parametrized using dense numpy arrays. + # Therefore, we work with CSC matrices as early as possible to limit + # unnecessary repeated memory copies. + eye = sparse.eye(n_indices, dtype=X.dtype, format="csc") + if self.fit_intercept: + ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype)) + A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc") + else: + A_eq = sparse.hstack([X, -X, eye, -eye], format="csc") + else: + eye = np.eye(n_indices) + if self.fit_intercept: + ones = np.ones((n_indices, 1)) + A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1) + else: + A_eq = np.concatenate([X, -X, eye, -eye], axis=1) + + b_eq = y + + result = linprog( + c=c, + A_eq=A_eq, + b_eq=b_eq, + method=solver, + options=solver_options, + ) + solution = result.x + if not result.success: + failure = { + 1: "Iteration limit reached.", + 2: "Problem appears to be infeasible.", + 3: "Problem appears to be unbounded.", + 4: "Numerical difficulties encountered.", + } + warnings.warn( + "Linear programming for QuantileRegressor did not succeed.\n" + f"Status is {result.status}: " + + failure.setdefault(result.status, "unknown reason") + + "\n" + + "Result message of linprog:\n" + + result.message, + ConvergenceWarning, + ) + + # positive slack - negative slack + # solution is an array with (params_pos, params_neg, u, v) + params = solution[:n_params] - solution[n_params : 2 * n_params] + + self.n_iter_ = result.nit + + if self.fit_intercept: + self.coef_ = params[1:] + self.intercept_ = params[0] + else: + self.coef_ = params + self.intercept_ = 0.0 + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..2626955ec2a7fe2f2a451a6bd141340613d19800 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sag.py @@ -0,0 +1,372 @@ +"""Solvers for Ridge and LogisticRegression using SAG algorithm""" + +# Authors: Tom Dupre la Tour +# +# License: BSD 3 clause + +import warnings + +import numpy as np + +from ..exceptions import ConvergenceWarning +from ..utils import check_array +from ..utils.extmath import row_norms +from ..utils.validation import _check_sample_weight +from ._base import make_dataset +from ._sag_fast import sag32, sag64 + + +def get_auto_step_size( + max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False +): + """Compute automatic step size for SAG solver. + + The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is + the max sum of squares for over all samples. + + Parameters + ---------- + max_squared_sum : float + Maximum squared sum of X over samples. + + alpha_scaled : float + Constant that multiplies the regularization term, scaled by + 1. / n_samples, the number of samples. + + loss : {'log', 'squared', 'multinomial'} + The loss function used in SAG solver. + + fit_intercept : bool + Specifies if a constant (a.k.a. bias or intercept) will be + added to the decision function. + + n_samples : int, default=None + Number of rows in X. Useful if is_saga=True. + + is_saga : bool, default=False + Whether to return step size for the SAGA algorithm or the SAG + algorithm. + + Returns + ------- + step_size : float + Step size used in SAG solver. + + References + ---------- + Schmidt, M., Roux, N. L., & Bach, F. (2013). + Minimizing finite sums with the stochastic average gradient + https://hal.inria.fr/hal-00860051/document + + :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014). + "SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + """ + if loss in ("log", "multinomial"): + L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled + elif loss == "squared": + # inverse Lipschitz constant for squared loss + L = max_squared_sum + int(fit_intercept) + alpha_scaled + else: + raise ValueError( + "Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'" + % loss + ) + if is_saga: + # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n)) + # See Defazio et al. 2014 + mun = min(2 * n_samples * alpha_scaled, L) + step = 1.0 / (2 * L + mun) + else: + # SAG theoretical step size is 1/16L but it is recommended to use 1 / L + # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf, + # slide 65 + step = 1.0 / L + return step + + +def sag_solver( + X, + y, + sample_weight=None, + loss="log", + alpha=1.0, + beta=0.0, + max_iter=1000, + tol=0.001, + verbose=0, + random_state=None, + check_input=True, + max_squared_sum=None, + warm_start_mem=None, + is_saga=False, +): + """SAG solver for Ridge and LogisticRegression. + + SAG stands for Stochastic Average Gradient: the gradient of the loss is + estimated each sample at a time and the model is updated along the way with + a constant learning rate. + + IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the + same scale. You can normalize the data by using + sklearn.preprocessing.StandardScaler on your data before passing it to the + fit method. + + This implementation works with data represented as dense numpy arrays or + sparse scipy arrays of floating point values for the features. It will + fit the data according to squared loss or log loss. + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using the squared euclidean norm L2. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. With loss='multinomial', y must be label encoded + (see preprocessing.LabelEncoder). + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + loss : {'log', 'squared', 'multinomial'}, default='log' + Loss function that will be optimized: + -'log' is the binary logistic loss, as used in LogisticRegression. + -'squared' is the squared loss, as used in Ridge. + -'multinomial' is the multinomial logistic loss, as used in + LogisticRegression. + + .. versionadded:: 0.18 + *loss='multinomial'* + + alpha : float, default=1. + L2 regularization term in the objective function + ``(0.5 * alpha * || W ||_F^2)``. + + beta : float, default=0. + L1 regularization term in the objective function + ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True. + + max_iter : int, default=1000 + The max number of passes over the training data if the stopping + criteria is not reached. + + tol : float, default=0.001 + The stopping criteria for the weights. The iterations will stop when + max(change in weights) / max(weights) < tol. + + verbose : int, default=0 + The verbosity level. + + random_state : int, RandomState instance or None, default=None + Used when shuffling the data. Pass an int for reproducible output + across multiple function calls. + See :term:`Glossary `. + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + max_squared_sum : float, default=None + Maximum squared sum of X over samples. If None, it will be computed, + going through all the samples. The value should be precomputed + to speed up cross validation. + + warm_start_mem : dict, default=None + The initialization parameters used for warm starting. Warm starting is + currently used in LogisticRegression but not in Ridge. + It contains: + - 'coef': the weight vector, with the intercept in last line + if the intercept is fitted. + - 'gradient_memory': the scalar gradient for all seen samples. + - 'sum_gradient': the sum of gradient over all seen samples, + for each feature. + - 'intercept_sum_gradient': the sum of gradient over all seen + samples, for the intercept. + - 'seen': array of boolean describing the seen samples. + - 'num_seen': the number of seen samples. + + is_saga : bool, default=False + Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves + better in the first epochs, and allow for l1 regularisation. + + Returns + ------- + coef_ : ndarray of shape (n_features,) + Weight vector. + + n_iter_ : int + The number of full pass on all samples. + + warm_start_mem : dict + Contains a 'coef' key with the fitted result, and possibly the + fitted intercept at the end of the array. Contains also other keys + used for warm starting. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import linear_model + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> X = rng.randn(n_samples, n_features) + >>> y = rng.randn(n_samples) + >>> clf = linear_model.Ridge(solver='sag') + >>> clf.fit(X, y) + Ridge(solver='sag') + + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> y = np.array([1, 1, 2, 2]) + >>> clf = linear_model.LogisticRegression( + ... solver='sag', multi_class='multinomial') + >>> clf.fit(X, y) + LogisticRegression(multi_class='multinomial', solver='sag') + + References + ---------- + Schmidt, M., Roux, N. L., & Bach, F. (2013). + Minimizing finite sums with the stochastic average gradient + https://hal.inria.fr/hal-00860051/document + + :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014). + "SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + + See Also + -------- + Ridge, SGDRegressor, ElasticNet, Lasso, SVR, + LogisticRegression, SGDClassifier, LinearSVC, Perceptron + """ + if warm_start_mem is None: + warm_start_mem = {} + # Ridge default max_iter is None + if max_iter is None: + max_iter = 1000 + + if check_input: + _dtype = [np.float64, np.float32] + X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C") + y = check_array(y, dtype=_dtype, ensure_2d=False, order="C") + + n_samples, n_features = X.shape[0], X.shape[1] + # As in SGD, the alpha is scaled by n_samples. + alpha_scaled = float(alpha) / n_samples + beta_scaled = float(beta) / n_samples + + # if loss == 'multinomial', y should be label encoded. + n_classes = int(y.max()) + 1 if loss == "multinomial" else 1 + + # initialization + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if "coef" in warm_start_mem.keys(): + coef_init = warm_start_mem["coef"] + else: + # assume fit_intercept is False + coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C") + + # coef_init contains possibly the intercept_init at the end. + # Note that Ridge centers the data before fitting, so fit_intercept=False. + fit_intercept = coef_init.shape[0] == (n_features + 1) + if fit_intercept: + intercept_init = coef_init[-1, :] + coef_init = coef_init[:-1, :] + else: + intercept_init = np.zeros(n_classes, dtype=X.dtype) + + if "intercept_sum_gradient" in warm_start_mem.keys(): + intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"] + else: + intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype) + + if "gradient_memory" in warm_start_mem.keys(): + gradient_memory_init = warm_start_mem["gradient_memory"] + else: + gradient_memory_init = np.zeros( + (n_samples, n_classes), dtype=X.dtype, order="C" + ) + if "sum_gradient" in warm_start_mem.keys(): + sum_gradient_init = warm_start_mem["sum_gradient"] + else: + sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C") + + if "seen" in warm_start_mem.keys(): + seen_init = warm_start_mem["seen"] + else: + seen_init = np.zeros(n_samples, dtype=np.int32, order="C") + + if "num_seen" in warm_start_mem.keys(): + num_seen_init = warm_start_mem["num_seen"] + else: + num_seen_init = 0 + + dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state) + + if max_squared_sum is None: + max_squared_sum = row_norms(X, squared=True).max() + step_size = get_auto_step_size( + max_squared_sum, + alpha_scaled, + loss, + fit_intercept, + n_samples=n_samples, + is_saga=is_saga, + ) + if step_size * alpha_scaled == 1: + raise ZeroDivisionError( + "Current sag implementation does not handle " + "the case step_size * alpha_scaled == 1" + ) + + sag = sag64 if X.dtype == np.float64 else sag32 + num_seen, n_iter_ = sag( + dataset, + coef_init, + intercept_init, + n_samples, + n_features, + n_classes, + tol, + max_iter, + loss, + step_size, + alpha_scaled, + beta_scaled, + sum_gradient_init, + gradient_memory_init, + seen_init, + num_seen_init, + fit_intercept, + intercept_sum_gradient, + intercept_decay, + is_saga, + verbose, + ) + + if n_iter_ == max_iter: + warnings.warn( + "The max_iter was reached which means the coef_ did not converge", + ConvergenceWarning, + ) + + if fit_intercept: + coef_init = np.vstack((coef_init, intercept_init)) + + warm_start_mem = { + "coef": coef_init, + "sum_gradient": sum_gradient_init, + "intercept_sum_gradient": intercept_sum_gradient, + "gradient_memory": gradient_memory_init, + "seen": seen_init, + "num_seen": num_seen, + } + + if loss == "multinomial": + coef_ = coef_init.T + else: + coef_ = coef_init[:, 0] + + return coef_, n_iter_, warm_start_mem diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ff254ef00ed47cbfe789952c9ac89ec9d1cd1c8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7ae704eee18db451a4159a8d4077dfbca8a5ebd1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pxd @@ -0,0 +1,26 @@ +# License: BSD 3 clause +"""Helper to load LossFunction from sgd_fast.pyx to sag_fast.pyx""" + +cdef class LossFunction: + cdef double loss(self, double p, double y) noexcept nogil + cdef double dloss(self, double p, double y) noexcept nogil + + +cdef class Regression(LossFunction): + cdef double loss(self, double p, double y) noexcept nogil + cdef double dloss(self, double p, double y) noexcept nogil + + +cdef class Classification(LossFunction): + cdef double loss(self, double p, double y) noexcept nogil + cdef double dloss(self, double p, double y) noexcept nogil + + +cdef class Log(Classification): + cdef double loss(self, double p, double y) noexcept nogil + cdef double dloss(self, double p, double y) noexcept nogil + + +cdef class SquaredLoss(Regression): + cdef double loss(self, double p, double y) noexcept nogil + cdef double dloss(self, double p, double y) noexcept nogil diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..1826b0c83bb79ed324f326e014d216abfb8a817c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py @@ -0,0 +1,2605 @@ +# Authors: Peter Prettenhofer (main author) +# Mathieu Blondel (partial_fit support) +# +# License: BSD 3 clause +"""Classification, regression and One-Class SVM using Stochastic Gradient +Descent (SGD). +""" + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + OutlierMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from ..exceptions import ConvergenceWarning +from ..model_selection import ShuffleSplit, StratifiedShuffleSplit +from ..utils import check_random_state, compute_class_weight, deprecated +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import _check_partial_fit_first_call +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._base import LinearClassifierMixin, SparseCoefMixin, make_dataset +from ._sgd_fast import ( + EpsilonInsensitive, + Hinge, + Huber, + Log, + ModifiedHuber, + SquaredEpsilonInsensitive, + SquaredHinge, + SquaredLoss, + _plain_sgd32, + _plain_sgd64, +) + +LEARNING_RATE_TYPES = { + "constant": 1, + "optimal": 2, + "invscaling": 3, + "adaptive": 4, + "pa1": 5, + "pa2": 6, +} + +PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} + +DEFAULT_EPSILON = 0.1 +# Default value of ``epsilon`` parameter. + +MAX_INT = np.iinfo(np.int32).max + + +class _ValidationScoreCallback: + """Callback for early stopping based on validation score""" + + def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None): + self.estimator = clone(estimator) + self.estimator.t_ = 1 # to pass check_is_fitted + if classes is not None: + self.estimator.classes_ = classes + self.X_val = X_val + self.y_val = y_val + self.sample_weight_val = sample_weight_val + + def __call__(self, coef, intercept): + est = self.estimator + est.coef_ = coef.reshape(1, -1) + est.intercept_ = np.atleast_1d(intercept) + return est.score(self.X_val, self.y_val, self.sample_weight_val) + + +class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for SGD classification and regression.""" + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left"), None], + "shuffle": ["boolean"], + "verbose": ["verbose"], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "average": [Interval(Integral, 0, None, closed="left"), bool, np.bool_], + } + + def __init__( + self, + loss, + *, + penalty="l2", + alpha=0.0001, + C=1.0, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=0.1, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + self.loss = loss + self.penalty = penalty + self.learning_rate = learning_rate + self.epsilon = epsilon + self.alpha = alpha + self.C = C + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.shuffle = shuffle + self.random_state = random_state + self.verbose = verbose + self.eta0 = eta0 + self.power_t = power_t + self.early_stopping = early_stopping + self.validation_fraction = validation_fraction + self.n_iter_no_change = n_iter_no_change + self.warm_start = warm_start + self.average = average + self.max_iter = max_iter + self.tol = tol + + @abstractmethod + def fit(self, X, y): + """Fit model.""" + + def _more_validate_params(self, for_partial_fit=False): + """Validate input params.""" + if self.early_stopping and for_partial_fit: + raise ValueError("early_stopping should be False with partial_fit") + if ( + self.learning_rate in ("constant", "invscaling", "adaptive") + and self.eta0 <= 0.0 + ): + raise ValueError("eta0 must be > 0") + if self.learning_rate == "optimal" and self.alpha == 0: + raise ValueError( + "alpha must be > 0 since " + "learning_rate is 'optimal'. alpha is used " + "to compute the optimal learning rate." + ) + + # raises ValueError if not registered + self._get_penalty_type(self.penalty) + self._get_learning_rate_type(self.learning_rate) + + def _get_loss_function(self, loss): + """Get concrete ``LossFunction`` object for str ``loss``.""" + loss_ = self.loss_functions[loss] + loss_class, args = loss_[0], loss_[1:] + if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"): + args = (self.epsilon,) + return loss_class(*args) + + def _get_learning_rate_type(self, learning_rate): + return LEARNING_RATE_TYPES[learning_rate] + + def _get_penalty_type(self, penalty): + penalty = str(penalty).lower() + return PENALTY_TYPES[penalty] + + def _allocate_parameter_mem( + self, + n_classes, + n_features, + input_dtype, + coef_init=None, + intercept_init=None, + one_class=0, + ): + """Allocate mem for parameters; initialize if provided.""" + if n_classes > 2: + # allocate coef_ for multi-class + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + if coef_init.shape != (n_classes, n_features): + raise ValueError("Provided ``coef_`` does not match dataset. ") + self.coef_ = coef_init + else: + self.coef_ = np.zeros( + (n_classes, n_features), dtype=input_dtype, order="C" + ) + + # allocate intercept_ for multi-class + if intercept_init is not None: + intercept_init = np.asarray( + intercept_init, order="C", dtype=input_dtype + ) + if intercept_init.shape != (n_classes,): + raise ValueError("Provided intercept_init does not match dataset.") + self.intercept_ = intercept_init + else: + self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C") + else: + # allocate coef_ + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + coef_init = coef_init.ravel() + if coef_init.shape != (n_features,): + raise ValueError("Provided coef_init does not match dataset.") + self.coef_ = coef_init + else: + self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C") + + # allocate intercept_ + if intercept_init is not None: + intercept_init = np.asarray(intercept_init, dtype=input_dtype) + if intercept_init.shape != (1,) and intercept_init.shape != (): + raise ValueError("Provided intercept_init does not match dataset.") + if one_class: + self.offset_ = intercept_init.reshape( + 1, + ) + else: + self.intercept_ = intercept_init.reshape( + 1, + ) + else: + if one_class: + self.offset_ = np.zeros(1, dtype=input_dtype, order="C") + else: + self.intercept_ = np.zeros(1, dtype=input_dtype, order="C") + + # initialize average parameters + if self.average > 0: + self._standard_coef = self.coef_ + self._average_coef = np.zeros( + self.coef_.shape, dtype=input_dtype, order="C" + ) + if one_class: + self._standard_intercept = 1 - self.offset_ + else: + self._standard_intercept = self.intercept_ + + self._average_intercept = np.zeros( + self._standard_intercept.shape, dtype=input_dtype, order="C" + ) + + def _make_validation_split(self, y, sample_mask): + """Split the dataset between training set and validation set. + + Parameters + ---------- + y : ndarray of shape (n_samples, ) + Target values. + + sample_mask : ndarray of shape (n_samples, ) + A boolean array indicating whether each sample should be included + for validation set. + + Returns + ------- + validation_mask : ndarray of shape (n_samples, ) + Equal to True on the validation set, False on the training set. + """ + n_samples = y.shape[0] + validation_mask = np.zeros(n_samples, dtype=np.bool_) + if not self.early_stopping: + # use the full set for training, with an empty validation set + return validation_mask + + if is_classifier(self): + splitter_type = StratifiedShuffleSplit + else: + splitter_type = ShuffleSplit + cv = splitter_type( + test_size=self.validation_fraction, random_state=self.random_state + ) + idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y)) + + if not np.any(sample_mask[idx_val]): + raise ValueError( + "The sample weights for validation set are all zero, consider using a" + " different random state." + ) + + if idx_train.shape[0] == 0 or idx_val.shape[0] == 0: + raise ValueError( + "Splitting %d samples into a train set and a validation set " + "with validation_fraction=%r led to an empty set (%d and %d " + "samples). Please either change validation_fraction, increase " + "number of samples, or disable early_stopping." + % ( + n_samples, + self.validation_fraction, + idx_train.shape[0], + idx_val.shape[0], + ) + ) + + validation_mask[idx_val] = True + return validation_mask + + def _make_validation_score_cb( + self, validation_mask, X, y, sample_weight, classes=None + ): + if not self.early_stopping: + return None + + return _ValidationScoreCallback( + self, + X[validation_mask], + y[validation_mask], + sample_weight[validation_mask], + classes=classes, + ) + + # TODO(1.6): Remove + # mypy error: Decorated property not supported + @deprecated( # type: ignore + "Attribute `loss_function_` was deprecated in version 1.4 and will be removed " + "in 1.6." + ) + @property + def loss_function_(self): + return self._loss_function_ + + +def _prepare_fit_binary(est, y, i, input_dtye): + """Initialization for fit_binary. + + Returns y, coef, intercept, average_coef, average_intercept. + """ + y_i = np.ones(y.shape, dtype=input_dtye, order="C") + y_i[y != est.classes_[i]] = -1.0 + average_intercept = 0 + average_coef = None + + if len(est.classes_) == 2: + if not est.average: + coef = est.coef_.ravel() + intercept = est.intercept_[0] + else: + coef = est._standard_coef.ravel() + intercept = est._standard_intercept[0] + average_coef = est._average_coef.ravel() + average_intercept = est._average_intercept[0] + else: + if not est.average: + coef = est.coef_[i] + intercept = est.intercept_[i] + else: + coef = est._standard_coef[i] + intercept = est._standard_intercept[i] + average_coef = est._average_coef[i] + average_intercept = est._average_intercept[i] + + return y_i, coef, intercept, average_coef, average_intercept + + +def fit_binary( + est, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + pos_weight, + neg_weight, + sample_weight, + validation_mask=None, + random_state=None, +): + """Fit a single binary classifier. + + The i'th class is considered the "positive" class. + + Parameters + ---------- + est : Estimator object + The estimator to fit + + i : int + Index of the positive class + + X : numpy array or sparse matrix of shape [n_samples,n_features] + Training data + + y : numpy array of shape [n_samples, ] + Target values + + alpha : float + The regularization parameter + + C : float + Maximum step size for passive aggressive + + learning_rate : str + The learning rate. Accepted values are 'constant', 'optimal', + 'invscaling', 'pa1' and 'pa2'. + + max_iter : int + The maximum number of iterations (epochs) + + pos_weight : float + The weight of the positive class + + neg_weight : float + The weight of the negative class + + sample_weight : numpy array of shape [n_samples, ] + The weight of each sample + + validation_mask : numpy array of shape [n_samples, ], default=None + Precomputed validation mask in case _fit_binary is called in the + context of a one-vs-rest reduction. + + random_state : int, RandomState instance, default=None + If int, random_state is the seed used by the random number generator; + If RandomState instance, random_state is the random number generator; + If None, the random number generator is the RandomState instance used + by `np.random`. + """ + # if average is not true, average_coef, and average_intercept will be + # unused + y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary( + est, y, i, input_dtye=X.dtype + ) + assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] + + random_state = check_random_state(random_state) + dataset, intercept_decay = make_dataset( + X, y_i, sample_weight, random_state=random_state + ) + + penalty_type = est._get_penalty_type(est.penalty) + learning_rate_type = est._get_learning_rate_type(learning_rate) + + if validation_mask is None: + validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0) + classes = np.array([-1, 1], dtype=y_i.dtype) + validation_score_cb = est._make_validation_score_cb( + validation_mask, X, y_i, sample_weight, classes=classes + ) + + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(MAX_INT) + + tol = est.tol if est.tol is not None else -np.inf + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd( + coef, + intercept, + average_coef, + average_intercept, + est._loss_function_, + penalty_type, + alpha, + C, + est.l1_ratio, + dataset, + validation_mask, + est.early_stopping, + validation_score_cb, + int(est.n_iter_no_change), + max_iter, + tol, + int(est.fit_intercept), + int(est.verbose), + int(est.shuffle), + seed, + pos_weight, + neg_weight, + learning_rate_type, + est.eta0, + est.power_t, + 0, + est.t_, + intercept_decay, + est.average, + ) + + if est.average: + if len(est.classes_) == 2: + est._average_intercept[0] = average_intercept + else: + est._average_intercept[i] = average_intercept + + return coef, intercept, n_iter_ + + +def _get_plain_sgd_function(input_dtype): + return _plain_sgd32 if input_dtype == np.float32 else _plain_sgd64 + + +class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta): + loss_functions = { + "hinge": (Hinge, 1.0), + "squared_hinge": (SquaredHinge, 1.0), + "perceptron": (Hinge, 0.0), + "log_loss": (Log,), + "modified_huber": (ModifiedHuber,), + "squared_error": (SquaredLoss,), + "huber": (Huber, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "class_weight": [StrOptions({"balanced"}), dict, None], + } + + @abstractmethod + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + self.class_weight = class_weight + self.n_jobs = n_jobs + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ): + first_call = not hasattr(self, "classes_") + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + n_samples, n_features = X.shape + + _check_partial_fit_first_call(self, classes) + + n_classes = self.classes_.shape[0] + + # Allocate datastructures from input arguments + self._expanded_class_weight = compute_class_weight( + self.class_weight, classes=self.classes_, y=y + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=n_classes, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + if n_classes > 2: + self._fit_multiclass( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + elif n_classes == 2: + self._fit_binary( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + else: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % n_classes + ) + + return self + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + if hasattr(self, "classes_"): + # delete the attribute otherwise _partial_fit thinks it's not the first call + delattr(self, "classes_") + + # labels can be encoded as float, int, or string literals + # np.unique sorts in asc order; largest class id is positive class + y = self._validate_data(y=y) + classes = np.unique(y) + + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + if self.average > 0: + self._standard_coef = self.coef_ + self._standard_intercept = self.intercept_ + self._average_coef = None + self._average_intercept = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + return self + + def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter): + """Fit a binary classifier on X and y.""" + coef, intercept, n_iter_ = fit_binary( + self, + 1, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[1], + self._expanded_class_weight[0], + sample_weight, + random_state=self.random_state, + ) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + # need to be 2d + if self.average > 0: + if self.average <= self.t_ - 1: + self.coef_ = self._average_coef.reshape(1, -1) + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef.reshape(1, -1) + self._standard_intercept = np.atleast_1d(intercept) + self.intercept_ = self._standard_intercept + else: + self.coef_ = coef.reshape(1, -1) + # intercept is a float, need to convert it to an array of length 1 + self.intercept_ = np.atleast_1d(intercept) + + def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): + """Fit a multi-class classifier by combining binary classifiers + + Each binary classifier predicts one class versus all others. This + strategy is called OvA (One versus All) or OvR (One versus Rest). + """ + # Precompute the validation split using the multiclass labels + # to ensure proper balancing of the classes. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + + # Use joblib to fit OvA in parallel. + # Pick the random seed for each job outside of fit_binary to avoid + # sharing the estimator random state between threads which could lead + # to non-deterministic behavior + random_state = check_random_state(self.random_state) + seeds = random_state.randint(MAX_INT, size=len(self.classes_)) + result = Parallel( + n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem" + )( + delayed(fit_binary)( + self, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[i], + 1.0, + sample_weight, + validation_mask=validation_mask, + random_state=seed, + ) + for i, seed in enumerate(seeds) + ) + + # take the maximum of n_iter_ over every binary fit + n_iter_ = 0.0 + for i, (_, intercept, n_iter_i) in enumerate(result): + self.intercept_[i] = intercept + n_iter_ = max(n_iter_, n_iter_i) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + if self.average > 0: + if self.average <= self.t_ - 1.0: + self.coef_ = self._average_coef + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef + self._standard_intercept = np.atleast_1d(self.intercept_) + self.intercept_ = self._standard_intercept + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence, early stopping, and + learning rate adjustments should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + + y : ndarray of shape (n_samples,) + Subset of the target values. + + classes : ndarray of shape (n_classes,), default=None + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "classes_"): + self._more_validate_params(for_partial_fit=True) + + if self.class_weight == "balanced": + raise ValueError( + "class_weight '{0}' is not supported for " + "partial_fit. In order to use 'balanced' weights," + " use compute_class_weight('{0}', " + "classes=classes, y=y). " + "In place of y you can use a large enough sample " + "of the full training set target to properly " + "estimate the class frequency distributions. " + "Pass the resulting weights as the class_weight " + "parameter.".format(self.class_weight) + ) + + return self._partial_fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + classes=classes, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_classes, n_features), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (n_classes,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns an instance of self. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + +class SGDClassifier(BaseSGDClassifier): + """Linear classifiers (SVM, logistic regression, etc.) with SGD training. + + This estimator implements regularized linear models with stochastic + gradient descent (SGD) learning: the gradient of the loss is estimated + each sample at a time and the model is updated along the way with a + decreasing strength schedule (aka learning rate). SGD allows minibatch + (online/out-of-core) learning via the `partial_fit` method. + For best results using the default learning rate schedule, the data should + have zero mean and unit variance. + + This implementation works with data represented as dense or sparse arrays + of floating point values for the features. The model it fits can be + controlled with the loss parameter; by default, it fits a linear support + vector machine (SVM). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : {'hinge', 'log_loss', 'modified_huber', 'squared_hinge',\ + 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\ + 'squared_epsilon_insensitive'}, default='hinge' + The loss function to be used. + + - 'hinge' gives a linear SVM. + - 'log_loss' gives logistic regression, a probabilistic classifier. + - 'modified_huber' is another smooth loss that brings tolerance to + outliers as well as probability estimates. + - 'squared_hinge' is like hinge but is quadratically penalized. + - 'perceptron' is the linear loss used by the perceptron algorithm. + - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and + 'squared_epsilon_insensitive' are designed for regression but can be useful + in classification as well; see + :class:`~sklearn.linear_model.SGDRegressor` for a description. + + More details about the losses formulas can be found in the + :ref:`User Guide `. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + Integer values must be in the range `[0, 2**32 - 1]`. + + learning_rate : str, default='optimal' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where `t0` is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': `eta = eta0`, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + `early_stopping` is `True`, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to `True`, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least tol for n_iter_no_change consecutive epochs. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to `True`, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + Integer values must be in the range `[1, n_samples]`. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + loss_function_ : concrete ``LossFunction`` + + .. deprecated:: 1.4 + Attribute `loss_function_` was deprecated in version 1.4 and will be + removed in 1.6. + + classes_ : array of shape (n_classes,) + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.LinearSVC : Linear support vector classification. + LogisticRegression : Logistic regression. + Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to + ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", + penalty=None)``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDClassifier + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> Y = np.array([1, 1, 2, 2]) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> clf = make_pipeline(StandardScaler(), + ... SGDClassifier(max_iter=1000, tol=1e-3)) + >>> clf.fit(X, Y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdclassifier', SGDClassifier())]) + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + **BaseSGDClassifier._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "epsilon": [Interval(Real, 0, None, closed="left")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + n_jobs=n_jobs, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + class_weight=class_weight, + warm_start=warm_start, + average=average, + ) + + def _check_proba(self): + if self.loss not in ("log_loss", "modified_huber"): + raise AttributeError( + "probability estimates are not available for loss=%r" % self.loss + ) + return True + + @available_if(_check_proba) + def predict_proba(self, X): + """Probability estimates. + + This method is only available for log loss and modified Huber loss. + + Multiclass probability estimates are derived from binary (one-vs.-rest) + estimates by simple normalization, as recommended by Zadrozny and + Elkan. + + Binary probability estimates for loss="modified_huber" are given by + (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions + it is necessary to perform proper probability calibration by wrapping + the classifier with + :class:`~sklearn.calibration.CalibratedClassifierCV` instead. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + ndarray of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in `self.classes_`. + + References + ---------- + Zadrozny and Elkan, "Transforming classifier scores into multiclass + probability estimates", SIGKDD'02, + https://dl.acm.org/doi/pdf/10.1145/775047.775151 + + The justification for the formula in the loss="modified_huber" + case is in the appendix B in: + http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf + """ + check_is_fitted(self) + + if self.loss == "log_loss": + return self._predict_proba_lr(X) + + elif self.loss == "modified_huber": + binary = len(self.classes_) == 2 + scores = self.decision_function(X) + + if binary: + prob2 = np.ones((scores.shape[0], 2)) + prob = prob2[:, 1] + else: + prob = scores + + np.clip(scores, -1, 1, prob) + prob += 1.0 + prob /= 2.0 + + if binary: + prob2[:, 0] -= prob + prob = prob2 + else: + # the above might assign zero to all classes, which doesn't + # normalize neatly; work around this to produce uniform + # probabilities + prob_sum = prob.sum(axis=1) + all_zero = prob_sum == 0 + if np.any(all_zero): + prob[all_zero, :] = 1 + prob_sum[all_zero] = len(self.classes_) + + # normalize + prob /= prob_sum.reshape((prob.shape[0], -1)) + + return prob + + else: + raise NotImplementedError( + "predict_(log_)proba only supported when" + " loss='log_loss' or loss='modified_huber' " + "(%r given)" + % self.loss + ) + + @available_if(_check_proba) + def predict_log_proba(self, X): + """Log of probability estimates. + + This method is only available for log loss and modified Huber loss. + + When loss="modified_huber", probability estimates may be hard zeros + and ones, so taking the logarithm is not possible. + + See ``predict_proba`` for details. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + T : array-like, shape (n_samples, n_classes) + Returns the log-probability of the sample for each class in the + model, where classes are ordered as they are in + `self.classes_`. + """ + return np.log(self.predict_proba(X)) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + "preserves_dtype": [np.float64, np.float32], + } + + +class BaseSGDRegressor(RegressorMixin, BaseSGD): + loss_functions = { + "squared_error": (SquaredLoss,), + "huber": (Huber, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + } + + @abstractmethod + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + intercept_init, + ): + first_call = getattr(self, "coef_", None) is None + X, y = self._validate_data( + X, + y, + accept_sparse="csr", + copy=False, + order="C", + dtype=[np.float64, np.float32], + accept_large_sparse=False, + reset=first_call, + ) + y = y.astype(X.dtype, copy=False) + + n_samples, n_features = X.shape + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # Allocate datastructures from input arguments + if first_call: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + if self.average > 0 and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._fit_regressor( + X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence and early stopping + should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of training data. + + y : numpy array of shape (n_samples,) + Subset of target values. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + return self._partial_fit( + X, + y, + self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + if self.warm_start and getattr(self, "coef_", None) is not None: + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_features,), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (1,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Fitted `SGDRegressor` estimator. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + def _decision_function(self, X): + """Predict using the linear model + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + check_is_fitted(self) + + X = self._validate_data(X, accept_sparse="csr", reset=False) + + scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + return scores.ravel() + + def predict(self, X): + """Predict using the linear model. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data. + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + return self._decision_function(X) + + def _fit_regressor( + self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ): + loss_function = self._get_loss_function(loss) + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + if not hasattr(self, "t_"): + self.t_ = 1.0 + + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, MAX_INT) + + dataset, intercept_decay = make_dataset( + X, y, sample_weight, random_state=random_state + ) + + tol = self.tol if self.tol is not None else -np.inf + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = self.intercept_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + loss_function, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + 1.0, + 1.0, + learning_rate_type, + self.eta0, + self.power_t, + 0, + self.t_, + intercept_decay, + self.average, + ) + + self.t_ += self.n_iter_ * X.shape[0] + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.intercept_ = np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.intercept_ = np.atleast_1d(intercept) + + else: + self.intercept_ = np.atleast_1d(intercept) + + +class SGDRegressor(BaseSGDRegressor): + """Linear model fitted by minimizing a regularized empirical loss with SGD. + + SGD stands for Stochastic Gradient Descent: the gradient of the loss is + estimated each sample at a time and the model is updated along the way with + a decreasing strength schedule (aka learning rate). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + This implementation works with data represented as dense numpy arrays of + floating point values for the features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : str, default='squared_error' + The loss function to be used. The possible values are 'squared_error', + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' + + The 'squared_error' refers to the ordinary least squares fit. + 'huber' modifies 'squared_error' to focus less on getting outliers + correct by switching from squared to linear loss past a distance of + epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is + linear past that; this is the loss function used in SVR. + 'squared_epsilon_insensitive' is the same but becomes squared loss past + a tolerance of epsilon. + + More details about the losses formulas can be found in the + :ref:`User Guide `. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + learning_rate : str, default='invscaling' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option + + eta0 : float, default=0.01 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.01. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.25 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least `tol` for `n_iter_no_change` consecutive + epochs. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) + The intercept term. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + Lars : Least Angle Regression model. + Lasso : Linear Model trained with L1 prior as regularizer. + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + Ridge : Linear least squares with l2 regularization. + sklearn.svm.SVR : Epsilon-Support Vector Regression. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDRegressor + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> reg = make_pipeline(StandardScaler(), + ... SGDRegressor(max_iter=1000, tol=1e-3)) + >>> reg.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdregressor', SGDRegressor())]) + """ + + _parameter_constraints: dict = { + **BaseSGDRegressor._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "epsilon": [Interval(Real, 0, None, closed="left")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + "preserves_dtype": [np.float64, np.float32], + } + + +class SGDOneClassSVM(BaseSGD, OutlierMixin): + """Solves linear One-Class SVM using Stochastic Gradient Descent. + + This implementation is meant to be used with a kernel approximation + technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results + similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by + default. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + nu : float, default=0.5 + The nu parameter of the One Class SVM: an upper bound on the + fraction of training errors and a lower bound of the fraction of + support vectors. Should be in the interval (0, 1]. By default 0.5 + will be taken. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. Defaults to True. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + `partial_fit`. Defaults to 1000. + Values must be in the range `[1, inf)`. + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). Defaults to 1e-3. + Values must be in the range `[0.0, inf)`. + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + Defaults to True. + + verbose : int, default=0 + The verbosity level. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use when shuffling + the data. If int, random_state is the seed used by the random number + generator; If RandomState instance, random_state is the random number + generator; If None, the random number generator is the RandomState + instance used by `np.random`. + + learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal' + The learning rate schedule to use with `fit`. (If using `partial_fit`, + learning rate must be controlled directly). + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So ``average=10`` will begin averaging after seeing 10 + samples. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features. + + offset_ : ndarray of shape (1,) + Offset used to define the decision function from the raw scores. + We have the relation: decision_function = score_samples - offset. + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + loss_function_ : concrete ``LossFunction`` + + .. deprecated:: 1.4 + ``loss_function_`` was deprecated in version 1.4 and will be removed in + 1.6. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + + Notes + ----- + This estimator has a linear complexity in the number of training samples + and is thus better suited than the `sklearn.svm.OneClassSVM` + implementation for datasets with a large number of training samples (say + > 10,000). + + Examples + -------- + >>> import numpy as np + >>> from sklearn import linear_model + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> clf = linear_model.SGDOneClassSVM(random_state=42) + >>> clf.fit(X) + SGDOneClassSVM(random_state=42) + + >>> print(clf.predict([[4, 4]])) + [1] + """ + + loss_functions = {"hinge": (Hinge, 1.0)} + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + "power_t": [Interval(Real, None, None, closed="neither")], + } + + def __init__( + self, + nu=0.5, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + warm_start=False, + average=False, + ): + self.nu = nu + super(SGDOneClassSVM, self).__init__( + loss="hinge", + penalty="l2", + C=1.0, + l1_ratio=0, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=DEFAULT_EPSILON, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=warm_start, + average=average, + ) + + def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter): + """Uses SGD implementation with X and y=np.ones(n_samples).""" + + # The One-Class SVM uses the SGD implementation with + # y=np.ones(n_samples). + n_samples = X.shape[0] + y = np.ones(n_samples, dtype=X.dtype, order="C") + + dataset, offset_decay = make_dataset(X, y, sample_weight) + + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + # early stopping is set to False for the One-Class SVM. thus + # validation_mask and validation_score_cb will be set to values + # associated to early_stopping=False in _make_validation_split and + # _make_validation_score_cb respectively. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, np.iinfo(np.int32).max) + + tol = self.tol if self.tol is not None else -np.inf + + one_class = 1 + # There are no class weights for the One-Class SVM and they are + # therefore set to 1. + pos_weight = 1 + neg_weight = 1 + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = 1 - self.offset_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + self._loss_function_, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + neg_weight, + pos_weight, + learning_rate_type, + self.eta0, + self.power_t, + one_class, + self.t_, + offset_decay, + self.average, + ) + + self.t_ += self.n_iter_ * n_samples + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.offset_ = 1 - np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.offset_ = 1 - np.atleast_1d(intercept) + + else: + self.offset_ = 1 - np.atleast_1d(intercept) + + def _partial_fit( + self, + X, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + offset_init, + ): + first_call = getattr(self, "coef_", None) is None + X = self._validate_data( + X, + None, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + n_features = X.shape[1] + + # Allocate datastructures from input arguments + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # We use intercept = 1 - offset where intercept is the intercept of + # the SGD implementation and offset is the offset of the One-Class SVM + # optimization problem. + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=offset_init, + one_class=1, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + if self.average and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + self._fit_one_class( + X, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + alpha = self.nu / 2 + return self._partial_fit( + X, + alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + offset_init=None, + ) + + def _fit( + self, + X, + alpha, + C, + loss, + learning_rate, + coef_init=None, + offset_init=None, + sample_weight=None, + ): + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if offset_init is None: + offset_init = self.offset_ + else: + self.coef_ = None + self.offset_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + offset_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + This solves an equivalent optimization problem of the + One-Class SVM primal optimization problem and returns a weight vector + w and an offset rho such that the decision function is given by + - rho. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + y : Ignored + Not used, present for API consistency by convention. + + coef_init : array, shape (n_classes, n_features) + The initial coefficients to warm-start the optimization. + + offset_init : array, shape (n_classes,) + The initial offset to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + self._more_validate_params() + + alpha = self.nu / 2 + self._fit( + X, + alpha=alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + offset_init=offset_init, + sample_weight=sample_weight, + ) + + return self + + def decision_function(self, X): + """Signed distance to the separating hyperplane. + + Signed distance is positive for an inlier and negative for an + outlier. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + dec : array-like, shape (n_samples,) + Decision function values of the samples. + """ + + check_is_fitted(self, "coef_") + + X = self._validate_data(X, accept_sparse="csr", reset=False) + decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_ + + return decisions.ravel() + + def score_samples(self, X): + """Raw scoring function of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + score_samples : array-like, shape (n_samples,) + Unshiffted scoring function values of the samples. + """ + score_samples = self.decision_function(X) + self.offset_ + return score_samples + + def predict(self, X): + """Return labels (1 inlier, -1 outlier) of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + y : array, shape (n_samples,) + Labels of the samples. + """ + y = (self.decision_function(X) >= 0).astype(np.int32) + y[y == 0] = -1 # for consistency with outlier detectors + return y + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ) + }, + "preserves_dtype": [np.float64, np.float32], + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py new file mode 100644 index 0000000000000000000000000000000000000000..cc774e878376244249cc320c0201da11ea7e075f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py @@ -0,0 +1,456 @@ +""" +A Theil-Sen Estimator for Multiple Linear Regression Model +""" + +# Author: Florian Wilhelm +# +# License: BSD 3 clause + + +import warnings +from itertools import combinations +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import linalg +from scipy.linalg.lapack import get_lapack_funcs +from scipy.special import binom + +from ..base import RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval +from ..utils.parallel import Parallel, delayed +from ._base import LinearModel + +_EPSILON = np.finfo(np.double).eps + + +def _modified_weiszfeld_step(X, x_old): + """Modified Weiszfeld step. + + This function defines one iteration step in order to approximate the + spatial median (L1 median). It is a form of an iteratively re-weighted + least squares method. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + x_old : ndarray of shape = (n_features,) + Current start vector. + + Returns + ------- + x_new : ndarray of shape (n_features,) + New iteration step. + + References + ---------- + - On Computation of Spatial Median for Robust Data Mining, 2005 + T. Kärkkäinen and S. Äyrämö + http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf + """ + diff = X - x_old + diff_norm = np.sqrt(np.sum(diff**2, axis=1)) + mask = diff_norm >= _EPSILON + # x_old equals one of our samples + is_x_old_in_X = int(mask.sum() < X.shape[0]) + + diff = diff[mask] + diff_norm = diff_norm[mask][:, np.newaxis] + quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0)) + + if quotient_norm > _EPSILON: # to avoid division by zero + new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum( + 1 / diff_norm, axis=0 + ) + else: + new_direction = 1.0 + quotient_norm = 1.0 + + return ( + max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction + + min(1.0, is_x_old_in_X / quotient_norm) * x_old + ) + + +def _spatial_median(X, max_iter=300, tol=1.0e-3): + """Spatial median (L1 median). + + The spatial median is member of a class of so-called M-estimators which + are defined by an optimization problem. Given a number of p points in an + n-dimensional space, the point x minimizing the sum of all distances to the + p other points is called spatial median. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + max_iter : int, default=300 + Maximum number of iterations. + + tol : float, default=1.e-3 + Stop the algorithm if spatial_median has converged. + + Returns + ------- + spatial_median : ndarray of shape = (n_features,) + Spatial median. + + n_iter : int + Number of iterations needed. + + References + ---------- + - On Computation of Spatial Median for Robust Data Mining, 2005 + T. Kärkkäinen and S. Äyrämö + http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf + """ + if X.shape[1] == 1: + return 1, np.median(X.ravel(), keepdims=True) + + tol **= 2 # We are computing the tol on the squared norm + spatial_median_old = np.mean(X, axis=0) + + for n_iter in range(max_iter): + spatial_median = _modified_weiszfeld_step(X, spatial_median_old) + if np.sum((spatial_median_old - spatial_median) ** 2) < tol: + break + else: + spatial_median_old = spatial_median + else: + warnings.warn( + "Maximum number of iterations {max_iter} reached in " + "spatial median for TheilSen regressor." + "".format(max_iter=max_iter), + ConvergenceWarning, + ) + return n_iter, spatial_median + + +def _breakdown_point(n_samples, n_subsamples): + """Approximation of the breakdown point. + + Parameters + ---------- + n_samples : int + Number of samples. + + n_subsamples : int + Number of subsamples to consider. + + Returns + ------- + breakdown_point : float + Approximation of breakdown point. + """ + return ( + 1 + - ( + 0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) + + n_subsamples + - 1 + ) + / n_samples + ) + + +def _lstsq(X, y, indices, fit_intercept): + """Least Squares Estimator for TheilSenRegressor class. + + This function calculates the least squares method on a subset of rows of X + and y defined by the indices array. Optionally, an intercept column is + added if intercept is set to true. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Design matrix, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : ndarray of shape (n_samples,) + Target vector, where `n_samples` is the number of samples. + + indices : ndarray of shape (n_subpopulation, n_subsamples) + Indices of all subsamples with respect to the chosen subpopulation. + + fit_intercept : bool + Fit intercept or not. + + Returns + ------- + weights : ndarray of shape (n_subpopulation, n_features + intercept) + Solution matrix of n_subpopulation solved least square problems. + """ + fit_intercept = int(fit_intercept) + n_features = X.shape[1] + fit_intercept + n_subsamples = indices.shape[1] + weights = np.empty((indices.shape[0], n_features)) + X_subpopulation = np.ones((n_subsamples, n_features)) + # gelss need to pad y_subpopulation to be of the max dim of X_subpopulation + y_subpopulation = np.zeros((max(n_subsamples, n_features))) + (lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation)) + + for index, subset in enumerate(indices): + X_subpopulation[:, fit_intercept:] = X[subset, :] + y_subpopulation[:n_subsamples] = y[subset] + weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features] + + return weights + + +class TheilSenRegressor(RegressorMixin, LinearModel): + """Theil-Sen Estimator: robust multivariate regression model. + + The algorithm calculates least square solutions on subsets with size + n_subsamples of the samples in X. Any value of n_subsamples between the + number of features and samples leads to an estimator with a compromise + between robustness and efficiency. Since the number of least square + solutions is "n_samples choose n_subsamples", it can be extremely large + and can therefore be limited with max_subpopulation. If this limit is + reached, the subsets are chosen randomly. In a final step, the spatial + median (or L1 median) is calculated of all least square solutions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_subpopulation : int, default=1e4 + Instead of computing with a set of cardinality 'n choose k', where n is + the number of samples and k is the number of subsamples (at least + number of features), consider only a stochastic subpopulation of a + given maximal size if 'n choose k' is larger than max_subpopulation. + For other than small problem sizes this parameter will determine + memory usage and runtime if n_subsamples is not changed. Note that the + data type should be int but floats such as 1e4 can be accepted too. + + n_subsamples : int, default=None + Number of samples to calculate the parameters. This is at least the + number of features (plus 1 if fit_intercept=True) and the number of + samples as a maximum. A lower number leads to a higher breakdown + point and a low efficiency while a high number leads to a low + breakdown point and a high efficiency. If None, take the + minimum number of subsamples leading to maximal robustness. + If n_subsamples is set to n_samples, Theil-Sen is identical to least + squares. + + max_iter : int, default=300 + Maximum number of iterations for the calculation of spatial median. + + tol : float, default=1e-3 + Tolerance when calculating spatial median. + + random_state : int, RandomState instance or None, default=None + A random number generator instance to define the state of the random + permutations generator. Pass an int for reproducible output across + multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + Verbose mode when fitting the model. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) + Coefficients of the regression model (median of distribution). + + intercept_ : float + Estimated intercept of regression model. + + breakdown_ : float + Approximated breakdown point. + + n_iter_ : int + Number of iterations needed for the spatial median. + + n_subpopulation_ : int + Number of combinations taken into account from 'n choose k', where n is + the number of samples and k is the number of subsamples. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + - Theil-Sen Estimators in a Multiple Linear Regression Model, 2009 + Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang + http://home.olemiss.edu/~xdang/papers/MTSE.pdf + + Examples + -------- + >>> from sklearn.linear_model import TheilSenRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression( + ... n_samples=200, n_features=2, noise=4.0, random_state=0) + >>> reg = TheilSenRegressor(random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9884... + >>> reg.predict(X[:1,]) + array([-31.5871...]) + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + # target_type should be Integral but can accept Real for backward compatibility + "max_subpopulation": [Interval(Real, 1, None, closed="left")], + "n_subsamples": [None, Integral], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "random_state": ["random_state"], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + fit_intercept=True, + copy_X=True, + max_subpopulation=1e4, + n_subsamples=None, + max_iter=300, + tol=1.0e-3, + random_state=None, + n_jobs=None, + verbose=False, + ): + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.max_subpopulation = max_subpopulation + self.n_subsamples = n_subsamples + self.max_iter = max_iter + self.tol = tol + self.random_state = random_state + self.n_jobs = n_jobs + self.verbose = verbose + + def _check_subparams(self, n_samples, n_features): + n_subsamples = self.n_subsamples + + if self.fit_intercept: + n_dim = n_features + 1 + else: + n_dim = n_features + + if n_subsamples is not None: + if n_subsamples > n_samples: + raise ValueError( + "Invalid parameter since n_subsamples > " + "n_samples ({0} > {1}).".format(n_subsamples, n_samples) + ) + if n_samples >= n_features: + if n_dim > n_subsamples: + plus_1 = "+1" if self.fit_intercept else "" + raise ValueError( + "Invalid parameter since n_features{0} " + "> n_subsamples ({1} > {2})." + "".format(plus_1, n_dim, n_subsamples) + ) + else: # if n_samples < n_features + if n_subsamples != n_samples: + raise ValueError( + "Invalid parameter since n_subsamples != " + "n_samples ({0} != {1}) while n_samples " + "< n_features.".format(n_subsamples, n_samples) + ) + else: + n_subsamples = min(n_dim, n_samples) + + all_combinations = max(1, np.rint(binom(n_samples, n_subsamples))) + n_subpopulation = int(min(self.max_subpopulation, all_combinations)) + + return n_subsamples, n_subpopulation + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit linear model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + self : returns an instance of self. + Fitted `TheilSenRegressor` estimator. + """ + random_state = check_random_state(self.random_state) + X, y = self._validate_data(X, y, y_numeric=True) + n_samples, n_features = X.shape + n_subsamples, self.n_subpopulation_ = self._check_subparams( + n_samples, n_features + ) + self.breakdown_ = _breakdown_point(n_samples, n_subsamples) + + if self.verbose: + print("Breakdown point: {0}".format(self.breakdown_)) + print("Number of samples: {0}".format(n_samples)) + tol_outliers = int(self.breakdown_ * n_samples) + print("Tolerable outliers: {0}".format(tol_outliers)) + print("Number of subpopulations: {0}".format(self.n_subpopulation_)) + + # Determine indices of subpopulation + if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation: + indices = list(combinations(range(n_samples), n_subsamples)) + else: + indices = [ + random_state.choice(n_samples, size=n_subsamples, replace=False) + for _ in range(self.n_subpopulation_) + ] + + n_jobs = effective_n_jobs(self.n_jobs) + index_list = np.array_split(indices, n_jobs) + weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)( + delayed(_lstsq)(X, y, index_list[job], self.fit_intercept) + for job in range(n_jobs) + ) + weights = np.vstack(weights) + self.n_iter_, coefs = _spatial_median( + weights, max_iter=self.max_iter, tol=self.tol + ) + + if self.fit_intercept: + self.intercept_ = coefs[0] + self.coef_ = coefs[1:] + else: + self.intercept_ = 0.0 + self.coef_ = coefs + + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/multiclass.py b/llmeval-env/lib/python3.10/site-packages/sklearn/multiclass.py new file mode 100644 index 0000000000000000000000000000000000000000..914aac99d82b56f6b621cd2306ae3a60c7fbae0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/multiclass.py @@ -0,0 +1,1269 @@ +""" +Multiclass classification strategies +==================================== + +This module implements multiclass learning algorithms: + - one-vs-the-rest / one-vs-all + - one-vs-one + - error correcting output codes + +The estimators provided in this module are meta-estimators: they require a base +estimator to be provided in their constructor. For example, it is possible to +use these estimators to turn a binary classifier or a regressor into a +multiclass classifier. It is also possible to use these estimators with +multiclass estimators in the hope that their accuracy or runtime performance +improves. + +All classifiers in scikit-learn implement multiclass classification; you +only need to use this module if you want to experiment with custom multiclass +strategies. + +The one-vs-the-rest meta-classifier also implements a `predict_proba` method, +so long as such a method is implemented by the base classifier. This method +returns probabilities of class membership in both the single label and +multilabel case. Note that in the multilabel case, probabilities are the +marginal probability that a given sample falls in the given class. As such, in +the multilabel case the sum of these probabilities over all possible labels +for a given sample *will not* sum to unity, as they do in the single label +case. +""" + +# Author: Mathieu Blondel +# Author: Hamzeh Alsalhi <93hamsal@gmail.com> +# +# License: BSD 3 clause + +import array +import itertools +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + MultiOutputMixin, + _fit_context, + clone, + is_classifier, + is_regressor, +) +from .metrics.pairwise import pairwise_distances_argmin +from .preprocessing import LabelBinarizer +from .utils import check_random_state +from .utils._param_validation import HasMethods, Interval +from .utils._tags import _safe_tags +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + process_routing, +) +from .utils.metaestimators import _safe_split, available_if +from .utils.multiclass import ( + _check_partial_fit_first_call, + _ovr_decision_function, + check_classification_targets, +) +from .utils.parallel import Parallel, delayed +from .utils.validation import _check_method_params, _num_samples, check_is_fitted + +__all__ = [ + "OneVsRestClassifier", + "OneVsOneClassifier", + "OutputCodeClassifier", +] + + +def _fit_binary(estimator, X, y, fit_params, classes=None): + """Fit a single binary estimator.""" + unique_y = np.unique(y) + if len(unique_y) == 1: + if classes is not None: + if y[0] == -1: + c = 0 + else: + c = y[0] + warnings.warn( + "Label %s is present in all training examples." % str(classes[c]) + ) + estimator = _ConstantPredictor().fit(X, unique_y) + else: + estimator = clone(estimator) + estimator.fit(X, y, **fit_params) + return estimator + + +def _partial_fit_binary(estimator, X, y, partial_fit_params): + """Partially fit a single binary estimator.""" + estimator.partial_fit(X, y, classes=np.array((0, 1)), **partial_fit_params) + return estimator + + +def _predict_binary(estimator, X): + """Make predictions using a single binary estimator.""" + if is_regressor(estimator): + return estimator.predict(X) + try: + score = np.ravel(estimator.decision_function(X)) + except (AttributeError, NotImplementedError): + # probabilities of the positive class + score = estimator.predict_proba(X)[:, 1] + return score + + +def _threshold_for_binary_predict(estimator): + """Threshold for predictions from binary estimator.""" + if hasattr(estimator, "decision_function") and is_classifier(estimator): + return 0.0 + else: + # predict_proba threshold + return 0.5 + + +class _ConstantPredictor(BaseEstimator): + """Helper predictor to be used when only one class is present.""" + + def fit(self, X, y): + check_params = dict( + force_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True + ) + self._validate_data( + X, y, reset=True, validate_separately=(check_params, check_params) + ) + self.y_ = y + return self + + def predict(self, X): + check_is_fitted(self) + self._validate_data( + X, + force_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + + return np.repeat(self.y_, _num_samples(X)) + + def decision_function(self, X): + check_is_fitted(self) + self._validate_data( + X, + force_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + + return np.repeat(self.y_, _num_samples(X)) + + def predict_proba(self, X): + check_is_fitted(self) + self._validate_data( + X, + force_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + y_ = self.y_.astype(np.float64) + return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0) + + +def _estimators_has(attr): + """Check if self.estimator or self.estimators_[0] has attr. + + If `self.estimators_[0]` has the attr, then its safe to assume that other + estimators have it too. We raise the original `AttributeError` if `attr` + does not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimators_"): + getattr(self.estimators_[0], attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class OneVsRestClassifier( + MultiOutputMixin, + ClassifierMixin, + MetaEstimatorMixin, + BaseEstimator, +): + """One-vs-the-rest (OvR) multiclass strategy. + + Also known as one-vs-all, this strategy consists in fitting one classifier + per class. For each classifier, the class is fitted against all the other + classes. In addition to its computational efficiency (only `n_classes` + classifiers are needed), one advantage of this approach is its + interpretability. Since each class is represented by one and one classifier + only, it is possible to gain knowledge about the class by inspecting its + corresponding classifier. This is the most commonly used strategy for + multiclass classification and is a fair default choice. + + OneVsRestClassifier can also be used for multilabel classification. To use + this feature, provide an indicator matrix for the target `y` when calling + `.fit`. In other words, the target labels should be formatted as a 2D + binary (0/1) matrix, where [i, j] == 1 indicates the presence of label j + in sample i. This estimator uses the binary relevance method to perform + multilabel classification, which involves training one binary classifier + independently for each label. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + A regressor or a classifier that implements :term:`fit`. + When a classifier is passed, :term:`decision_function` will be used + in priority and it will fallback to :term:`predict_proba` if it is not + available. + When a regressor is passed, :term:`predict` is used. + + n_jobs : int, default=None + The number of jobs to use for the computation: the `n_classes` + one-vs-rest problems are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from 1 to None + + verbose : int, default=0 + The verbosity level, if non zero, progress messages are printed. + Below 50, the output is sent to stderr. Otherwise, the output is sent + to stdout. The frequency of the messages increases with the verbosity + level, reporting all iterations at 10. See :class:`joblib.Parallel` for + more details. + + .. versionadded:: 1.1 + + Attributes + ---------- + estimators_ : list of `n_classes` estimators + Estimators used for predictions. + + classes_ : array, shape = [`n_classes`] + Class labels. + + n_classes_ : int + Number of classes. + + label_binarizer_ : LabelBinarizer object + Object used to transform multiclass labels to binary labels and + vice-versa. + + multilabel_ : boolean + Whether a OneVsRestClassifier is a multilabel classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsOneClassifier : One-vs-one multiclass strategy. + OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. + sklearn.multioutput.MultiOutputClassifier : Alternate way of extending an + estimator for multilabel classification. + sklearn.preprocessing.MultiLabelBinarizer : Transform iterable of iterables + to binary indicator matrix. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.multiclass import OneVsRestClassifier + >>> from sklearn.svm import SVC + >>> X = np.array([ + ... [10, 10], + ... [8, 10], + ... [-5, 5.5], + ... [-5.4, 5.5], + ... [-20, -20], + ... [-15, -20] + ... ]) + >>> y = np.array([0, 0, 1, 1, 2, 2]) + >>> clf = OneVsRestClassifier(SVC()).fit(X, y) + >>> clf.predict([[-19, -20], [9, 9], [-5, 5]]) + array([2, 0, 1]) + """ + + _parameter_constraints = { + "estimator": [HasMethods(["fit"])], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__(self, estimator, *, n_jobs=None, verbose=0): + self.estimator = estimator + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context( + # OneVsRestClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Multi-class targets. An indicator matrix turns on multilabel + classification. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + # A sparse LabelBinarizer, with sparse_output=True, has been shown to + # outperform or match a dense label binarizer in all cases and has also + # resulted in less or equal memory consumption in the fit_ovr function + # overall. + self.label_binarizer_ = LabelBinarizer(sparse_output=True) + Y = self.label_binarizer_.fit_transform(y) + Y = Y.tocsc() + self.classes_ = self.label_binarizer_.classes_ + columns = (col.toarray().ravel() for col in Y.T) + # In cases where individual estimators are very fast to train setting + # n_jobs > 1 in can results in slower performance due to the overhead + # of spawning threads. See joblib issue #112. + self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_fit_binary)( + self.estimator, + X, + column, + fit_params=routed_params.estimator.fit, + classes=[ + "not %s" % self.label_binarizer_.classes_[i], + self.label_binarizer_.classes_[i], + ], + ) + for i, column in enumerate(columns) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + @available_if(_estimators_has("partial_fit")) + @_fit_context( + # OneVsRestClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, **partial_fit_params): + """Partially fit underlying estimators. + + Should be used when memory is inefficient to train all data. + Chunks of data can be passed in several iterations. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Multi-class targets. An indicator matrix turns on multilabel + classification. + + classes : array, shape (n_classes, ) + Classes across all calls to partial_fit. + Can be obtained via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is only required in the first call of partial_fit + and can be omitted in the subsequent calls. + + **partial_fit_params : dict + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Instance of partially fitted estimator. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + + if _check_partial_fit_first_call(self, classes): + self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] + + # A sparse LabelBinarizer, with sparse_output=True, has been + # shown to outperform or match a dense label binarizer in all + # cases and has also resulted in less or equal memory consumption + # in the fit_ovr function overall. + self.label_binarizer_ = LabelBinarizer(sparse_output=True) + self.label_binarizer_.fit(self.classes_) + + if len(np.setdiff1d(y, self.classes_)): + raise ValueError( + ( + "Mini-batch contains {0} while classes " + "must be subset of {1}" + ).format(np.unique(y), self.classes_) + ) + + Y = self.label_binarizer_.transform(y) + Y = Y.tocsc() + columns = (col.toarray().ravel() for col in Y.T) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_binary)( + estimator, + X, + column, + partial_fit_params=routed_params.estimator.partial_fit, + ) + for estimator, column in zip(self.estimators_, columns) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + + return self + + def predict(self, X): + """Predict multi-class targets using underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Predicted multi-class targets. + """ + check_is_fitted(self) + + n_samples = _num_samples(X) + if self.label_binarizer_.y_type_ == "multiclass": + maxima = np.empty(n_samples, dtype=float) + maxima.fill(-np.inf) + argmaxima = np.zeros(n_samples, dtype=int) + for i, e in enumerate(self.estimators_): + pred = _predict_binary(e, X) + np.maximum(maxima, pred, out=maxima) + argmaxima[maxima == pred] = i + return self.classes_[argmaxima] + else: + thresh = _threshold_for_binary_predict(self.estimators_[0]) + indices = array.array("i") + indptr = array.array("i", [0]) + for e in self.estimators_: + indices.extend(np.where(_predict_binary(e, X) > thresh)[0]) + indptr.append(len(indices)) + data = np.ones(len(indices), dtype=int) + indicator = sp.csc_matrix( + (data, indices, indptr), shape=(n_samples, len(self.estimators_)) + ) + return self.label_binarizer_.inverse_transform(indicator) + + @available_if(_estimators_has("predict_proba")) + def predict_proba(self, X): + """Probability estimates. + + The returned estimates for all classes are ordered by label of classes. + + Note that in the multilabel case, each sample can have any number of + labels. This returns the marginal probability that the given sample has + the label in question. For example, it is entirely consistent that two + labels both have a 90% probability of applying to a given sample. + + In the single label multiclass case, the rows of the returned matrix + sum to 1. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in `self.classes_`. + """ + check_is_fitted(self) + # Y[i, j] gives the probability that sample i has the label j. + # In the multi-label case, these are not disjoint. + Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T + + if len(self.estimators_) == 1: + # Only one estimator, but we still want to return probabilities + # for two classes. + Y = np.concatenate(((1 - Y), Y), axis=1) + + if not self.multilabel_: + # Then, probabilities should be normalized to 1. + Y /= np.sum(Y, axis=1)[:, np.newaxis] + return Y + + @available_if(_estimators_has("decision_function")) + def decision_function(self, X): + """Decision function for the OneVsRestClassifier. + + Return the distance of each sample from the decision boundary for each + class. This can only be used with estimators which implement the + `decision_function` method. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) or (n_samples,) for \ + binary classification. + Result of calling `decision_function` on the final estimator. + + .. versionchanged:: 0.19 + output shape changed to ``(n_samples,)`` to conform to + scikit-learn conventions for binary classification. + """ + check_is_fitted(self) + if len(self.estimators_) == 1: + return self.estimators_[0].decision_function(X) + return np.array( + [est.decision_function(X).ravel() for est in self.estimators_] + ).T + + @property + def multilabel_(self): + """Whether this is a multilabel classifier.""" + return self.label_binarizer_.y_type_.startswith("multilabel") + + @property + def n_classes_(self): + """Number of classes.""" + return len(self.classes_) + + def _more_tags(self): + """Indicate if wrapped estimator is using a precomputed Gram matrix""" + return {"pairwise": _safe_tags(self.estimator, key="pairwise")} + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="fit", caller="fit") + .add(callee="partial_fit", caller="partial_fit"), + ) + ) + return router + + +def _fit_ovo_binary(estimator, X, y, i, j, fit_params): + """Fit a single binary estimator (one-vs-one).""" + cond = np.logical_or(y == i, y == j) + y = y[cond] + y_binary = np.empty(y.shape, int) + y_binary[y == i] = 0 + y_binary[y == j] = 1 + indcond = np.arange(_num_samples(X))[cond] + + fit_params_subset = _check_method_params(X, params=fit_params, indices=indcond) + return ( + _fit_binary( + estimator, + _safe_split(estimator, X, None, indices=indcond)[0], + y_binary, + fit_params=fit_params_subset, + classes=[i, j], + ), + indcond, + ) + + +def _partial_fit_ovo_binary(estimator, X, y, i, j, partial_fit_params): + """Partially fit a single binary estimator(one-vs-one).""" + + cond = np.logical_or(y == i, y == j) + y = y[cond] + if len(y) != 0: + y_binary = np.zeros_like(y) + y_binary[y == j] = 1 + partial_fit_params_subset = _check_method_params( + X, params=partial_fit_params, indices=cond + ) + return _partial_fit_binary( + estimator, X[cond], y_binary, partial_fit_params=partial_fit_params_subset + ) + return estimator + + +class OneVsOneClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """One-vs-one multiclass strategy. + + This strategy consists in fitting one classifier per class pair. + At prediction time, the class which received the most votes is selected. + Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers, + this method is usually slower than one-vs-the-rest, due to its + O(n_classes^2) complexity. However, this method may be advantageous for + algorithms such as kernel algorithms which don't scale well with + `n_samples`. This is because each individual learning problem only involves + a small subset of the data whereas, with one-vs-the-rest, the complete + dataset is used `n_classes` times. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + A regressor or a classifier that implements :term:`fit`. + When a classifier is passed, :term:`decision_function` will be used + in priority and it will fallback to :term:`predict_proba` if it is not + available. + When a regressor is passed, :term:`predict` is used. + + n_jobs : int, default=None + The number of jobs to use for the computation: the `n_classes * ( + n_classes - 1) / 2` OVO problems are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + estimators_ : list of ``n_classes * (n_classes - 1) / 2`` estimators + Estimators used for predictions. + + classes_ : numpy array of shape [n_classes] + Array containing labels. + + n_classes_ : int + Number of classes. + + pairwise_indices_ : list, length = ``len(estimators_)``, or ``None`` + Indices of samples used when training the estimators. + ``None`` when ``estimator``'s `pairwise` tag is False. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsRestClassifier : One-vs-all multiclass strategy. + OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.multiclass import OneVsOneClassifier + >>> from sklearn.svm import LinearSVC + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.33, shuffle=True, random_state=0) + >>> clf = OneVsOneClassifier( + ... LinearSVC(dual="auto", random_state=0)).fit(X_train, y_train) + >>> clf.predict(X_test[:10]) + array([2, 1, 0, 2, 0, 2, 0, 1, 1, 1]) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_jobs": [Integral, None], + } + + def __init__(self, estimator, *, n_jobs=None): + self.estimator = estimator + self.n_jobs = n_jobs + + @_fit_context( + # OneVsOneClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + The fitted underlying estimator. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + + # We need to validate the data because we do a safe_indexing later. + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc"], force_all_finite=False + ) + check_classification_targets(y) + + self.classes_ = np.unique(y) + if len(self.classes_) == 1: + raise ValueError( + "OneVsOneClassifier can not be fit when only one class is present." + ) + n_classes = self.classes_.shape[0] + estimators_indices = list( + zip( + *( + Parallel(n_jobs=self.n_jobs)( + delayed(_fit_ovo_binary)( + self.estimator, + X, + y, + self.classes_[i], + self.classes_[j], + fit_params=routed_params.estimator.fit, + ) + for i in range(n_classes) + for j in range(i + 1, n_classes) + ) + ) + ) + ) + + self.estimators_ = estimators_indices[0] + + pairwise = self._get_tags()["pairwise"] + self.pairwise_indices_ = estimators_indices[1] if pairwise else None + + return self + + @available_if(_estimators_has("partial_fit")) + @_fit_context( + # OneVsOneClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, **partial_fit_params): + """Partially fit underlying estimators. + + Should be used when memory is inefficient to train all data. Chunks + of data can be passed in several iteration, where the first call + should have an array of all target variables. + + Parameters + ---------- + X : {array-like, sparse matrix) of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + classes : array, shape (n_classes, ) + Classes across all calls to partial_fit. + Can be obtained via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is only required in the first call of partial_fit + and can be omitted in the subsequent calls. + + **partial_fit_params : dict + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + The partially fitted underlying estimator. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + + first_call = _check_partial_fit_first_call(self, classes) + if first_call: + self.estimators_ = [ + clone(self.estimator) + for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2) + ] + + if len(np.setdiff1d(y, self.classes_)): + raise ValueError( + "Mini-batch contains {0} while it must be subset of {1}".format( + np.unique(y), self.classes_ + ) + ) + + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + force_all_finite=False, + reset=first_call, + ) + check_classification_targets(y) + combinations = itertools.combinations(range(self.n_classes_), 2) + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_ovo_binary)( + estimator, + X, + y, + self.classes_[i], + self.classes_[j], + partial_fit_params=routed_params.estimator.partial_fit, + ) + for estimator, (i, j) in zip(self.estimators_, (combinations)) + ) + + self.pairwise_indices_ = None + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + + return self + + def predict(self, X): + """Estimate the best class label for each sample in X. + + This is implemented as ``argmax(decision_function(X), axis=1)`` which + will return the label of the class with most votes by estimators + predicting the outcome of a decision for each possible class pair. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : numpy array of shape [n_samples] + Predicted multi-class targets. + """ + Y = self.decision_function(X) + if self.n_classes_ == 2: + thresh = _threshold_for_binary_predict(self.estimators_[0]) + return self.classes_[(Y > thresh).astype(int)] + return self.classes_[Y.argmax(axis=1)] + + def decision_function(self, X): + """Decision function for the OneVsOneClassifier. + + The decision values for the samples are computed by adding the + normalized sum of pair-wise classification confidence levels to the + votes in order to disambiguate between the decision values when the + votes for all the classes are equal leading to a tie. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + Y : array-like of shape (n_samples, n_classes) or (n_samples,) + Result of calling `decision_function` on the final estimator. + + .. versionchanged:: 0.19 + output shape changed to ``(n_samples,)`` to conform to + scikit-learn conventions for binary classification. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + + indices = self.pairwise_indices_ + if indices is None: + Xs = [X] * len(self.estimators_) + else: + Xs = [X[:, idx] for idx in indices] + + predictions = np.vstack( + [est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)] + ).T + confidences = np.vstack( + [_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)] + ).T + Y = _ovr_decision_function(predictions, confidences, len(self.classes_)) + if self.n_classes_ == 2: + return Y[:, 1] + return Y + + @property + def n_classes_(self): + """Number of classes.""" + return len(self.classes_) + + def _more_tags(self): + """Indicate if wrapped estimator is using a precomputed Gram matrix""" + return {"pairwise": _safe_tags(self.estimator, key="pairwise")} + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="fit", caller="fit") + .add(callee="partial_fit", caller="partial_fit"), + ) + ) + return router + + +class OutputCodeClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """(Error-Correcting) Output-Code multiclass strategy. + + Output-code based strategies consist in representing each class with a + binary code (an array of 0s and 1s). At fitting time, one binary + classifier per bit in the code book is fitted. At prediction time, the + classifiers are used to project new points in the class space and the class + closest to the points is chosen. The main advantage of these strategies is + that the number of classifiers used can be controlled by the user, either + for compressing the model (0 < `code_size` < 1) or for making the model more + robust to errors (`code_size` > 1). See the documentation for more details. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and one of + :term:`decision_function` or :term:`predict_proba`. + + code_size : float, default=1.5 + Percentage of the number of classes to be used to create the code book. + A number between 0 and 1 will require fewer classifiers than + one-vs-the-rest. A number greater than 1 will require more classifiers + than one-vs-the-rest. + + random_state : int, RandomState instance, default=None + The generator used to initialize the codebook. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of jobs to use for the computation: the multiclass problems + are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + estimators_ : list of `int(n_classes * code_size)` estimators + Estimators used for predictions. + + classes_ : ndarray of shape (n_classes,) + Array containing labels. + + code_book_ : ndarray of shape (n_classes, `len(estimators_)`) + Binary array containing the code of each class. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsRestClassifier : One-vs-all multiclass strategy. + OneVsOneClassifier : One-vs-one multiclass strategy. + + References + ---------- + + .. [1] "Solving multiclass learning problems via error-correcting output + codes", + Dietterich T., Bakiri G., + Journal of Artificial Intelligence Research 2, + 1995. + + .. [2] "The error coding method and PICTs", + James G., Hastie T., + Journal of Computational and Graphical statistics 7, + 1998. + + .. [3] "The Elements of Statistical Learning", + Hastie T., Tibshirani R., Friedman J., page 606 (second-edition) + 2008. + + Examples + -------- + >>> from sklearn.multiclass import OutputCodeClassifier + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=100, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = OutputCodeClassifier( + ... estimator=RandomForestClassifier(random_state=0), + ... random_state=0).fit(X, y) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + """ + + _parameter_constraints: dict = { + "estimator": [ + HasMethods(["fit", "decision_function"]), + HasMethods(["fit", "predict_proba"]), + ], + "code_size": [Interval(Real, 0.0, None, closed="neither")], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + } + + def __init__(self, estimator, *, code_size=1.5, random_state=None, n_jobs=None): + self.estimator = estimator + self.code_size = code_size + self.random_state = random_state + self.n_jobs = n_jobs + + @_fit_context( + # OutputCodeClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + + y = self._validate_data(X="no_validation", y=y) + + random_state = check_random_state(self.random_state) + check_classification_targets(y) + + self.classes_ = np.unique(y) + n_classes = self.classes_.shape[0] + if n_classes == 0: + raise ValueError( + "OutputCodeClassifier can not be fit when no class is present." + ) + n_estimators = int(n_classes * self.code_size) + + # FIXME: there are more elaborate methods than generating the codebook + # randomly. + self.code_book_ = random_state.uniform(size=(n_classes, n_estimators)) + self.code_book_[self.code_book_ > 0.5] = 1.0 + + if hasattr(self.estimator, "decision_function"): + self.code_book_[self.code_book_ != 1] = -1.0 + else: + self.code_book_[self.code_book_ != 1] = 0.0 + + classes_index = {c: i for i, c in enumerate(self.classes_)} + + Y = np.array( + [self.code_book_[classes_index[y[i]]] for i in range(_num_samples(y))], + dtype=int, + ) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_binary)( + self.estimator, X, Y[:, i], fit_params=routed_params.estimator.fit + ) + for i in range(Y.shape[1]) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + def predict(self, X): + """Predict multi-class targets using underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : ndarray of shape (n_samples,) + Predicted multi-class targets. + """ + check_is_fitted(self) + # ArgKmin only accepts C-contiguous array. The aggregated predictions need to be + # transposed. We therefore create a F-contiguous array to avoid a copy and have + # a C-contiguous array after the transpose operation. + Y = np.array( + [_predict_binary(e, X) for e in self.estimators_], + order="F", + dtype=np.float64, + ).T + pred = pairwise_distances_argmin(Y, self.code_book_, metric="euclidean") + return self.classes_[pred] + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping().add(callee="fit", caller="fit"), + ) + return router diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/multioutput.py b/llmeval-env/lib/python3.10/site-packages/sklearn/multioutput.py new file mode 100644 index 0000000000000000000000000000000000000000..bfb83884399ef9be6c3385cbb6025c63c7042724 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/multioutput.py @@ -0,0 +1,1179 @@ +""" +This module implements multioutput regression and classification. + +The estimators provided in this module are meta-estimators: they require +a base estimator to be provided in their constructor. The meta-estimator +extends single output estimators to multioutput estimators. +""" + +# Author: Tim Head +# Author: Hugo Bowne-Anderson +# Author: Chris Rivera +# Author: Michael Williamson +# Author: James Ashton Nichols +# +# License: BSD 3 clause + + +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from .model_selection import cross_val_predict +from .utils import Bunch, _print_elapsed_time, check_random_state +from .utils._param_validation import HasMethods, StrOptions +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from .utils.metaestimators import available_if +from .utils.multiclass import check_classification_targets +from .utils.parallel import Parallel, delayed +from .utils.validation import _check_method_params, check_is_fitted, has_fit_parameter + +__all__ = [ + "MultiOutputRegressor", + "MultiOutputClassifier", + "ClassifierChain", + "RegressorChain", +] + + +def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params): + estimator = clone(estimator) + if sample_weight is not None: + estimator.fit(X, y, sample_weight=sample_weight, **fit_params) + else: + estimator.fit(X, y, **fit_params) + return estimator + + +def _partial_fit_estimator( + estimator, X, y, classes=None, partial_fit_params=None, first_time=True +): + partial_fit_params = {} if partial_fit_params is None else partial_fit_params + if first_time: + estimator = clone(estimator) + + if classes is not None: + estimator.partial_fit(X, y, classes=classes, **partial_fit_params) + else: + estimator.partial_fit(X, y, **partial_fit_params) + return estimator + + +def _available_if_estimator_has(attr): + """Return a function to check if the sub-estimator(s) has(have) `attr`. + + Helper for Chain implementations. + """ + + def _check(self): + if hasattr(self, "estimators_"): + return all(hasattr(est, attr) for est in self.estimators_) + + if hasattr(self.estimator, attr): + return True + + return False + + return available_if(_check) + + +class _MultiOutputEstimator(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"])], + "n_jobs": [Integral, None], + } + + @abstractmethod + def __init__(self, estimator, *, n_jobs=None): + self.estimator = estimator + self.n_jobs = n_jobs + + @_available_if_estimator_has("partial_fit") + @_fit_context( + # MultiOutput*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, sample_weight=None, **partial_fit_params): + """Incrementally fit a separate model for each class output. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. + + classes : list of ndarray of shape (n_outputs,), default=None + Each array is unique classes for one output in str/int. + Can be obtained via + ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where `y` + is the target matrix of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that `y` doesn't need to contain all labels in `classes`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **partial_fit_params : dict of str -> object + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns a fitted instance. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + first_time = not hasattr(self, "estimators_") + + y = self._validate_data(X="no_validation", y=y, multi_output=True) + + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi-output regression but has only one." + ) + + if _routing_enabled(): + if sample_weight is not None: + partial_fit_params["sample_weight"] = sample_weight + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + else: + if sample_weight is not None and not has_fit_parameter( + self.estimator, "sample_weight" + ): + raise ValueError( + "Underlying estimator does not support sample weights." + ) + + if sample_weight is not None: + routed_params = Bunch( + estimator=Bunch(partial_fit=Bunch(sample_weight=sample_weight)) + ) + else: + routed_params = Bunch(estimator=Bunch(partial_fit=Bunch())) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_estimator)( + self.estimators_[i] if not first_time else self.estimator, + X, + y[:, i], + classes[i] if classes is not None else None, + partial_fit_params=routed_params.estimator.partial_fit, + first_time=first_time, + ) + for i in range(y.shape[1]) + ) + + if first_time and hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if first_time and hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + @_fit_context( + # MultiOutput*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None, **fit_params): + """Fit the model to data, separately for each output variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. An indicator matrix turns on multilabel + estimation. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **fit_params : dict of string -> object + Parameters passed to the ``estimator.fit`` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + if not hasattr(self.estimator, "fit"): + raise ValueError("The base estimator should implement a fit method") + + y = self._validate_data(X="no_validation", y=y, multi_output=True) + + if is_classifier(self): + check_classification_targets(y) + + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi-output regression but has only one." + ) + + if _routing_enabled(): + if sample_weight is not None: + fit_params["sample_weight"] = sample_weight + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + else: + if sample_weight is not None and not has_fit_parameter( + self.estimator, "sample_weight" + ): + raise ValueError( + "Underlying estimator does not support sample weights." + ) + + fit_params_validated = _check_method_params(X, params=fit_params) + routed_params = Bunch(estimator=Bunch(fit=fit_params_validated)) + if sample_weight is not None: + routed_params.estimator.fit["sample_weight"] = sample_weight + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_estimator)( + self.estimator, X, y[:, i], **routed_params.estimator.fit + ) + for i in range(y.shape[1]) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + def predict(self, X): + """Predict multi-output variable using model for each target variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets predicted across multiple predictors. + Note: Separate models are generated for each predictor. + """ + check_is_fitted(self) + if not hasattr(self.estimators_[0], "predict"): + raise ValueError("The base estimator should implement a predict method") + + y = Parallel(n_jobs=self.n_jobs)( + delayed(e.predict)(X) for e in self.estimators_ + ) + + return np.asarray(y).T + + def _more_tags(self): + return {"multioutput_only": True} + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(callee="partial_fit", caller="partial_fit") + .add(callee="fit", caller="fit"), + ) + return router + + +class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator): + """Multi target regression. + + This strategy consists of fitting one regressor per target. This is a + simple strategy for extending regressors that do not natively support + multi-target regression. + + .. versionadded:: 0.18 + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and :term:`predict`. + + n_jobs : int or None, optional (default=None) + The number of jobs to run in parallel. + :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported + by the passed estimator) will be parallelized for each target. + + When individual estimators are fast to train or predict, + using ``n_jobs > 1`` can result in slower performance due + to the parallelism overhead. + + ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all available processes / threads. + See :term:`Glossary ` for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from `1` to `None`. + + Attributes + ---------- + estimators_ : list of ``n_output`` estimators + Estimators used for predictions. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + RegressorChain : A multi-label model that arranges regressions into a + chain. + MultiOutputClassifier : Classifies each output independently rather than + chaining. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_linnerud + >>> from sklearn.multioutput import MultiOutputRegressor + >>> from sklearn.linear_model import Ridge + >>> X, y = load_linnerud(return_X_y=True) + >>> regr = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y) + >>> regr.predict(X[[0]]) + array([[176..., 35..., 57...]]) + """ + + def __init__(self, estimator, *, n_jobs=None): + super().__init__(estimator, n_jobs=n_jobs) + + @_available_if_estimator_has("partial_fit") + def partial_fit(self, X, y, sample_weight=None, **partial_fit_params): + """Incrementally fit the model to data, for each output variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **partial_fit_params : dict of str -> object + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().partial_fit(X, y, sample_weight=sample_weight, **partial_fit_params) + + +class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator): + """Multi target classification. + + This strategy consists of fitting one classifier per target. This is a + simple strategy for extending classifiers that do not natively support + multi-target classification. + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and :term:`predict`. + A :term:`predict_proba` method will be exposed only if `estimator` implements + it. + + n_jobs : int or None, optional (default=None) + The number of jobs to run in parallel. + :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported + by the passed estimator) will be parallelized for each target. + + When individual estimators are fast to train or predict, + using ``n_jobs > 1`` can result in slower performance due + to the parallelism overhead. + + ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all available processes / threads. + See :term:`Glossary ` for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from `1` to `None`. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Class labels. + + estimators_ : list of ``n_output`` estimators + Estimators used for predictions. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + ClassifierChain : A multi-label model that arranges binary classifiers + into a chain. + MultiOutputRegressor : Fits one regressor per target variable. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_multilabel_classification + >>> from sklearn.multioutput import MultiOutputClassifier + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_multilabel_classification(n_classes=3, random_state=0) + >>> clf = MultiOutputClassifier(LogisticRegression()).fit(X, y) + >>> clf.predict(X[-2:]) + array([[1, 1, 1], + [1, 0, 1]]) + """ + + def __init__(self, estimator, *, n_jobs=None): + super().__init__(estimator, n_jobs=n_jobs) + + def fit(self, X, Y, sample_weight=None, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying classifier supports sample + weights. + + **fit_params : dict of string -> object + Parameters passed to the ``estimator.fit`` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().fit(X, Y, sample_weight=sample_weight, **fit_params) + self.classes_ = [estimator.classes_ for estimator in self.estimators_] + return self + + def _check_predict_proba(self): + if hasattr(self, "estimators_"): + # raise an AttributeError if `predict_proba` does not exist for + # each estimator + [getattr(est, "predict_proba") for est in self.estimators_] + return True + # raise an AttributeError if `predict_proba` does not exist for the + # unfitted estimator + getattr(self.estimator, "predict_proba") + return True + + @available_if(_check_predict_proba) + def predict_proba(self, X): + """Return prediction probabilities for each class of each output. + + This method will raise a ``ValueError`` if any of the + estimators do not have ``predict_proba``. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data. + + Returns + ------- + p : array of shape (n_samples, n_classes), or a list of n_outputs \ + such arrays if n_outputs > 1. + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + + .. versionchanged:: 0.19 + This function now returns a list of arrays where the length of + the list is ``n_outputs``, and each array is (``n_samples``, + ``n_classes``) for that particular output. + """ + check_is_fitted(self) + results = [estimator.predict_proba(X) for estimator in self.estimators_] + return results + + def score(self, X, y): + """Return the mean accuracy on the given test data and labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples, n_outputs) + True values for X. + + Returns + ------- + scores : float + Mean accuracy of predicted target versus true target. + """ + check_is_fitted(self) + n_outputs_ = len(self.estimators_) + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi target classification but has only one" + ) + if y.shape[1] != n_outputs_: + raise ValueError( + "The number of outputs of Y for fit {0} and" + " score {1} should be same".format(n_outputs_, y.shape[1]) + ) + y_pred = self.predict(X) + return np.mean(np.all(y == y_pred, axis=1)) + + def _more_tags(self): + # FIXME + return {"_skip_test": True} + + +def _available_if_base_estimator_has(attr): + """Return a function to check if `base_estimator` or `estimators_` has `attr`. + + Helper for Chain implementations. + """ + + def _check(self): + return hasattr(self.base_estimator, attr) or all( + hasattr(est, attr) for est in self.estimators_ + ) + + return available_if(_check) + + +class _BaseChain(BaseEstimator, metaclass=ABCMeta): + _parameter_constraints: dict = { + "base_estimator": [HasMethods(["fit", "predict"])], + "order": ["array-like", StrOptions({"random"}), None], + "cv": ["cv_object", StrOptions({"prefit"})], + "random_state": ["random_state"], + "verbose": ["boolean"], + } + + def __init__( + self, base_estimator, *, order=None, cv=None, random_state=None, verbose=False + ): + self.base_estimator = base_estimator + self.order = order + self.cv = cv + self.random_state = random_state + self.verbose = verbose + + def _log_message(self, *, estimator_idx, n_estimators, processing_msg): + if not self.verbose: + return None + return f"({estimator_idx} of {n_estimators}) {processing_msg}" + + @abstractmethod + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True) + + random_state = check_random_state(self.random_state) + self.order_ = self.order + if isinstance(self.order_, tuple): + self.order_ = np.array(self.order_) + + if self.order_ is None: + self.order_ = np.array(range(Y.shape[1])) + elif isinstance(self.order_, str): + if self.order_ == "random": + self.order_ = random_state.permutation(Y.shape[1]) + elif sorted(self.order_) != list(range(Y.shape[1])): + raise ValueError("invalid order") + + self.estimators_ = [clone(self.base_estimator) for _ in range(Y.shape[1])] + + if self.cv is None: + Y_pred_chain = Y[:, self.order_] + if sp.issparse(X): + X_aug = sp.hstack((X, Y_pred_chain), format="lil") + X_aug = X_aug.tocsr() + else: + X_aug = np.hstack((X, Y_pred_chain)) + + elif sp.issparse(X): + Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1])) + X_aug = sp.hstack((X, Y_pred_chain), format="lil") + + else: + Y_pred_chain = np.zeros((X.shape[0], Y.shape[1])) + X_aug = np.hstack((X, Y_pred_chain)) + + del Y_pred_chain + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + routed_params = Bunch(estimator=Bunch(fit=fit_params)) + + for chain_idx, estimator in enumerate(self.estimators_): + message = self._log_message( + estimator_idx=chain_idx + 1, + n_estimators=len(self.estimators_), + processing_msg=f"Processing order {self.order_[chain_idx]}", + ) + y = Y[:, self.order_[chain_idx]] + with _print_elapsed_time("Chain", message): + estimator.fit( + X_aug[:, : (X.shape[1] + chain_idx)], + y, + **routed_params.estimator.fit, + ) + + if self.cv is not None and chain_idx < len(self.estimators_) - 1: + col_idx = X.shape[1] + chain_idx + cv_result = cross_val_predict( + self.base_estimator, X_aug[:, :col_idx], y=y, cv=self.cv + ) + if sp.issparse(X_aug): + X_aug[:, col_idx] = np.expand_dims(cv_result, 1) + else: + X_aug[:, col_idx] = cv_result + + return self + + def predict(self, X): + """Predict on the data matrix X using the ClassifierChain model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_pred : array-like of shape (n_samples, n_classes) + The predicted values. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse=True, reset=False) + Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) + for chain_idx, estimator in enumerate(self.estimators_): + previous_predictions = Y_pred_chain[:, :chain_idx] + if sp.issparse(X): + if chain_idx == 0: + X_aug = X + else: + X_aug = sp.hstack((X, previous_predictions)) + else: + X_aug = np.hstack((X, previous_predictions)) + Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) + + inv_order = np.empty_like(self.order_) + inv_order[self.order_] = np.arange(len(self.order_)) + Y_pred = Y_pred_chain[:, inv_order] + + return Y_pred + + +class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain): + """A multi-label model that arranges binary classifiers into a chain. + + Each model makes a prediction in the order specified by the chain using + all of the available features provided to the model plus the predictions + of models that are earlier in the chain. + + For an example of how to use ``ClassifierChain`` and benefit from its + ensemble, see + :ref:`ClassifierChain on a yeast dataset + ` example. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.19 + + Parameters + ---------- + base_estimator : estimator + The base estimator from which the classifier chain is built. + + order : array-like of shape (n_outputs,) or 'random', default=None + If `None`, the order will be determined by the order of columns in + the label matrix Y.:: + + order = [0, 1, 2, ..., Y.shape[1] - 1] + + The order of the chain can be explicitly set by providing a list of + integers. For example, for a chain of length 5.:: + + order = [1, 3, 2, 4, 0] + + means that the first model in the chain will make predictions for + column 1 in the Y matrix, the second model will make predictions + for column 3, etc. + + If order is `random` a random ordering will be used. + + cv : int, cross-validation generator or an iterable, default=None + Determines whether to use cross validated predictions or true + labels for the results of previous estimators in the chain. + Possible inputs for cv are: + + - None, to use true labels when fitting, + - integer, to specify the number of folds in a (Stratified)KFold, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + random_state : int, RandomState instance or None, optional (default=None) + If ``order='random'``, determines random number generation for the + chain order. + In addition, it controls the random seed given at each `base_estimator` + at each chaining iteration. Thus, it is only used when `base_estimator` + exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + If True, chain progress is output as each model is completed. + + .. versionadded:: 1.2 + + Attributes + ---------- + classes_ : list + A list of arrays of length ``len(estimators_)`` containing the + class labels for each estimator in the chain. + + estimators_ : list + A list of clones of base_estimator. + + order_ : list + The order of labels in the classifier chain. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `base_estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + RegressorChain : Equivalent for regression. + MultiOutputClassifier : Classifies each output independently rather than + chaining. + + References + ---------- + Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier + Chains for Multi-label Classification", 2009. + + Examples + -------- + >>> from sklearn.datasets import make_multilabel_classification + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.multioutput import ClassifierChain + >>> X, Y = make_multilabel_classification( + ... n_samples=12, n_classes=3, random_state=0 + ... ) + >>> X_train, X_test, Y_train, Y_test = train_test_split( + ... X, Y, random_state=0 + ... ) + >>> base_lr = LogisticRegression(solver='lbfgs', random_state=0) + >>> chain = ClassifierChain(base_lr, order='random', random_state=0) + >>> chain.fit(X_train, Y_train).predict(X_test) + array([[1., 1., 0.], + [1., 0., 0.], + [0., 1., 0.]]) + >>> chain.predict_proba(X_test) + array([[0.8387..., 0.9431..., 0.4576...], + [0.8878..., 0.3684..., 0.2640...], + [0.0321..., 0.9935..., 0.0626...]]) + """ + + @_fit_context( + # ClassifierChain.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method of each step. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Class instance. + """ + _raise_for_params(fit_params, self, "fit") + + super().fit(X, Y, **fit_params) + self.classes_ = [estimator.classes_ for estimator in self.estimators_] + return self + + @_available_if_base_estimator_has("predict_proba") + def predict_proba(self, X): + """Predict probability estimates. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_prob : array-like of shape (n_samples, n_classes) + The predicted probabilities. + """ + X = self._validate_data(X, accept_sparse=True, reset=False) + Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_))) + Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) + for chain_idx, estimator in enumerate(self.estimators_): + previous_predictions = Y_pred_chain[:, :chain_idx] + if sp.issparse(X): + X_aug = sp.hstack((X, previous_predictions)) + else: + X_aug = np.hstack((X, previous_predictions)) + Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1] + Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) + inv_order = np.empty_like(self.order_) + inv_order[self.order_] = np.arange(len(self.order_)) + Y_prob = Y_prob_chain[:, inv_order] + + return Y_prob + + def predict_log_proba(self, X): + """Predict logarithm of probability estimates. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_log_prob : array-like of shape (n_samples, n_classes) + The predicted logarithm of the probabilities. + """ + return np.log(self.predict_proba(X)) + + @_available_if_base_estimator_has("decision_function") + def decision_function(self, X): + """Evaluate the decision_function of the models in the chain. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_decision : array-like of shape (n_samples, n_classes) + Returns the decision function of the sample for each model + in the chain. + """ + X = self._validate_data(X, accept_sparse=True, reset=False) + Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_))) + Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) + for chain_idx, estimator in enumerate(self.estimators_): + previous_predictions = Y_pred_chain[:, :chain_idx] + if sp.issparse(X): + X_aug = sp.hstack((X, previous_predictions)) + else: + X_aug = np.hstack((X, previous_predictions)) + Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug) + Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) + + inv_order = np.empty_like(self.order_) + inv_order[self.order_] = np.arange(len(self.order_)) + Y_decision = Y_decision_chain[:, inv_order] + + return Y_decision + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.base_estimator, + method_mapping=MethodMapping().add(callee="fit", caller="fit"), + ) + return router + + def _more_tags(self): + return {"_skip_test": True, "multioutput_only": True} + + +class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain): + """A multi-label model that arranges regressions into a chain. + + Each model makes a prediction in the order specified by the chain using + all of the available features provided to the model plus the predictions + of models that are earlier in the chain. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + base_estimator : estimator + The base estimator from which the regressor chain is built. + + order : array-like of shape (n_outputs,) or 'random', default=None + If `None`, the order will be determined by the order of columns in + the label matrix Y.:: + + order = [0, 1, 2, ..., Y.shape[1] - 1] + + The order of the chain can be explicitly set by providing a list of + integers. For example, for a chain of length 5.:: + + order = [1, 3, 2, 4, 0] + + means that the first model in the chain will make predictions for + column 1 in the Y matrix, the second model will make predictions + for column 3, etc. + + If order is 'random' a random ordering will be used. + + cv : int, cross-validation generator or an iterable, default=None + Determines whether to use cross validated predictions or true + labels for the results of previous estimators in the chain. + Possible inputs for cv are: + + - None, to use true labels when fitting, + - integer, to specify the number of folds in a (Stratified)KFold, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + random_state : int, RandomState instance or None, optional (default=None) + If ``order='random'``, determines random number generation for the + chain order. + In addition, it controls the random seed given at each `base_estimator` + at each chaining iteration. Thus, it is only used when `base_estimator` + exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + If True, chain progress is output as each model is completed. + + .. versionadded:: 1.2 + + Attributes + ---------- + estimators_ : list + A list of clones of base_estimator. + + order_ : list + The order of labels in the classifier chain. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `base_estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ClassifierChain : Equivalent for classification. + MultiOutputRegressor : Learns each output independently rather than + chaining. + + Examples + -------- + >>> from sklearn.multioutput import RegressorChain + >>> from sklearn.linear_model import LogisticRegression + >>> logreg = LogisticRegression(solver='lbfgs',multi_class='multinomial') + >>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]] + >>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y) + >>> chain.predict(X) + array([[0., 2.], + [1., 1.], + [2., 0.]]) + """ + + @_fit_context( + # RegressorChain.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method at each step + of the regressor chain. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().fit(X, Y, **fit_params) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.base_estimator, + method_mapping=MethodMapping().add(callee="fit", caller="fit"), + ) + return router + + def _more_tags(self): + return {"multioutput_only": True} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/naive_bayes.py b/llmeval-env/lib/python3.10/site-packages/sklearn/naive_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a129779dd89f9517bb19982365c8314c3cfa8a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/naive_bayes.py @@ -0,0 +1,1515 @@ +""" +The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These +are supervised learning methods based on applying Bayes' theorem with strong +(naive) feature independence assumptions. +""" + +# Author: Vincent Michel +# Minor fixes by Fabian Pedregosa +# Amit Aides +# Yehuda Finkelstein +# Lars Buitinck +# Jan Hendrik Metzen +# (parts based on earlier work by Mathieu Blondel) +# +# License: BSD 3 clause +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +from scipy.special import logsumexp + +from .base import BaseEstimator, ClassifierMixin, _fit_context +from .preprocessing import LabelBinarizer, binarize, label_binarize +from .utils._param_validation import Interval +from .utils.extmath import safe_sparse_dot +from .utils.multiclass import _check_partial_fit_first_call +from .utils.validation import _check_sample_weight, check_is_fitted, check_non_negative + +__all__ = [ + "BernoulliNB", + "GaussianNB", + "MultinomialNB", + "ComplementNB", + "CategoricalNB", +] + + +class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): + """Abstract base class for naive Bayes estimators""" + + @abstractmethod + def _joint_log_likelihood(self, X): + """Compute the unnormalized posterior log probability of X + + I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of + shape (n_samples, n_classes). + + Public methods predict, predict_proba, predict_log_proba, and + predict_joint_log_proba pass the input through _check_X before handing it + over to _joint_log_likelihood. The term "joint log likelihood" is used + interchangibly with "joint log probability". + """ + + @abstractmethod + def _check_X(self, X): + """To be overridden in subclasses with the actual checks. + + Only used in predict* methods. + """ + + def predict_joint_log_proba(self, X): + """Return joint log probability estimates for the test vector X. + + For each row x of X and class y, the joint log probability is given by + ``log P(x, y) = log P(y) + log P(x|y),`` + where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is + the class-conditional probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Returns the joint log-probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._check_X(X) + return self._joint_log_likelihood(X) + + def predict(self, X): + """ + Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X. + """ + check_is_fitted(self) + X = self._check_X(X) + jll = self._joint_log_likelihood(X) + return self.classes_[np.argmax(jll, axis=1)] + + def predict_log_proba(self, X): + """ + Return log-probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the log-probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._check_X(X) + jll = self._joint_log_likelihood(X) + # normalize by P(x) = P(f_1, ..., f_n) + log_prob_x = logsumexp(jll, axis=1) + return jll - np.atleast_2d(log_prob_x).T + + def predict_proba(self, X): + """ + Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + return np.exp(self.predict_log_proba(X)) + + +class GaussianNB(_BaseNB): + """ + Gaussian Naive Bayes (GaussianNB). + + Can perform online updates to model parameters via :meth:`partial_fit`. + For details on algorithm used to update feature means and variance online, + see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: + + http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + priors : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + var_smoothing : float, default=1e-9 + Portion of the largest variance of all features that is added to + variances for calculation stability. + + .. versionadded:: 0.20 + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + number of training samples observed in each class. + + class_prior_ : ndarray of shape (n_classes,) + probability of each class. + + classes_ : ndarray of shape (n_classes,) + class labels known to the classifier. + + epsilon_ : float + absolute additive value to variances. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + var_ : ndarray of shape (n_classes, n_features) + Variance of each feature per class. + + .. versionadded:: 1.0 + + theta_ : ndarray of shape (n_classes, n_features) + mean of each feature per class. + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : Complement Naive Bayes classifier. + MultinomialNB : Naive Bayes classifier for multinomial models. + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> Y = np.array([1, 1, 1, 2, 2, 2]) + >>> from sklearn.naive_bayes import GaussianNB + >>> clf = GaussianNB() + >>> clf.fit(X, Y) + GaussianNB() + >>> print(clf.predict([[-0.8, -1]])) + [1] + >>> clf_pf = GaussianNB() + >>> clf_pf.partial_fit(X, Y, np.unique(Y)) + GaussianNB() + >>> print(clf_pf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "priors": ["array-like", None], + "var_smoothing": [Interval(Real, 0, None, closed="left")], + } + + def __init__(self, *, priors=None, var_smoothing=1e-9): + self.priors = priors + self.var_smoothing = var_smoothing + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Gaussian Naive Bayes according to X, y. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + .. versionadded:: 0.17 + Gaussian Naive Bayes supports fitting with *sample_weight*. + + Returns + ------- + self : object + Returns the instance itself. + """ + y = self._validate_data(y=y) + return self._partial_fit( + X, y, np.unique(y), _refit=True, sample_weight=sample_weight + ) + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + return self._validate_data(X, reset=False) + + @staticmethod + def _update_mean_variance(n_past, mu, var, X, sample_weight=None): + """Compute online update of Gaussian mean and variance. + + Given starting sample count, mean, and variance, a new set of + points X, and optionally sample weights, return the updated mean and + variance. (NB - each dimension (column) in X is treated as independent + -- you get variance, not covariance). + + Can take scalar mean and variance, or vector mean and variance to + simultaneously update a number of independent Gaussians. + + See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: + + http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf + + Parameters + ---------- + n_past : int + Number of samples represented in old mean and variance. If sample + weights were given, this should contain the sum of sample + weights represented in old mean and variance. + + mu : array-like of shape (number of Gaussians,) + Means for Gaussians in original set. + + var : array-like of shape (number of Gaussians,) + Variances for Gaussians in original set. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + total_mu : array-like of shape (number of Gaussians,) + Updated mean for each Gaussian over the combined set. + + total_var : array-like of shape (number of Gaussians,) + Updated variance for each Gaussian over the combined set. + """ + if X.shape[0] == 0: + return mu, var + + # Compute (potentially weighted) mean and variance of new datapoints + if sample_weight is not None: + n_new = float(sample_weight.sum()) + if np.isclose(n_new, 0.0): + return mu, var + new_mu = np.average(X, axis=0, weights=sample_weight) + new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) + else: + n_new = X.shape[0] + new_var = np.var(X, axis=0) + new_mu = np.mean(X, axis=0) + + if n_past == 0: + return new_mu, new_var + + n_total = float(n_past + n_new) + + # Combine mean of old and new data, taking into consideration + # (weighted) number of observations + total_mu = (n_new * new_mu + n_past * mu) / n_total + + # Combine variance of old and new data, taking into consideration + # (weighted) number of observations. This is achieved by combining + # the sum-of-squared-differences (ssd) + old_ssd = n_past * var + new_ssd = n_new * new_var + total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 + total_var = total_ssd / n_total + + return total_mu, total_var + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance and numerical stability overhead, + hence it is better to call partial_fit on chunks of data that are + as large as possible (as long as fitting in the memory budget) to + hide the overhead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + .. versionadded:: 0.17 + + Returns + ------- + self : object + Returns the instance itself. + """ + return self._partial_fit( + X, y, classes, _refit=False, sample_weight=sample_weight + ) + + def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): + """Actual implementation of Gaussian NB fitting. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + _refit : bool, default=False + If true, act as though this were the first time we called + _partial_fit (ie, throw away any past fitting and start over). + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + """ + if _refit: + self.classes_ = None + + first_call = _check_partial_fit_first_call(self, classes) + X, y = self._validate_data(X, y, reset=first_call) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + # If the ratio of data variance between dimensions is too small, it + # will cause numerical errors. To address this, we artificially + # boost the variance by epsilon, a small fraction of the standard + # deviation of the largest dimension. + self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max() + + if first_call: + # This is the first call to partial_fit: + # initialize various cumulative counters + n_features = X.shape[1] + n_classes = len(self.classes_) + self.theta_ = np.zeros((n_classes, n_features)) + self.var_ = np.zeros((n_classes, n_features)) + + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + + # Initialise the class prior + # Take into account the priors + if self.priors is not None: + priors = np.asarray(self.priors) + # Check that the provided prior matches the number of classes + if len(priors) != n_classes: + raise ValueError("Number of priors must match number of classes.") + # Check that the sum is 1 + if not np.isclose(priors.sum(), 1.0): + raise ValueError("The sum of the priors should be 1.") + # Check that the priors are non-negative + if (priors < 0).any(): + raise ValueError("Priors must be non-negative.") + self.class_prior_ = priors + else: + # Initialize the priors to zeros for each class + self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64) + else: + if X.shape[1] != self.theta_.shape[1]: + msg = "Number of features %d does not match previous data %d." + raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) + # Put epsilon back in each time + self.var_[:, :] -= self.epsilon_ + + classes = self.classes_ + + unique_y = np.unique(y) + unique_y_in_classes = np.isin(unique_y, classes) + + if not np.all(unique_y_in_classes): + raise ValueError( + "The target label(s) %s in y do not exist in the initial classes %s" + % (unique_y[~unique_y_in_classes], classes) + ) + + for y_i in unique_y: + i = classes.searchsorted(y_i) + X_i = X[y == y_i, :] + + if sample_weight is not None: + sw_i = sample_weight[y == y_i] + N_i = sw_i.sum() + else: + sw_i = None + N_i = X_i.shape[0] + + new_theta, new_sigma = self._update_mean_variance( + self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i + ) + + self.theta_[i, :] = new_theta + self.var_[i, :] = new_sigma + self.class_count_[i] += N_i + + self.var_[:, :] += self.epsilon_ + + # Update if only no priors is provided + if self.priors is None: + # Empirical prior, with sample_weight taken into account + self.class_prior_ = self.class_count_ / self.class_count_.sum() + + return self + + def _joint_log_likelihood(self, X): + joint_log_likelihood = [] + for i in range(np.size(self.classes_)): + jointi = np.log(self.class_prior_[i]) + n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * self.var_[i, :])) + n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), 1) + joint_log_likelihood.append(jointi + n_ij) + + joint_log_likelihood = np.array(joint_log_likelihood).T + return joint_log_likelihood + + +class _BaseDiscreteNB(_BaseNB): + """Abstract base class for naive Bayes on discrete/categorical data + + Any estimator based on this class should provide: + + __init__ + _joint_log_likelihood(X) as per _BaseNB + _update_feature_log_prob(alpha) + _count(X, Y) + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "fit_prior": ["boolean"], + "class_prior": ["array-like", None], + "force_alpha": ["boolean"], + } + + def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha=True): + self.alpha = alpha + self.fit_prior = fit_prior + self.class_prior = class_prior + self.force_alpha = force_alpha + + @abstractmethod + def _count(self, X, Y): + """Update counts that are used to calculate probabilities. + + The counts make up a sufficient statistic extracted from the data. + Accordingly, this method is called each time `fit` or `partial_fit` + update the model. `class_count_` and `feature_count_` must be updated + here along with any model specific counts. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples. + Y : ndarray of shape (n_samples, n_classes) + Binarized class labels. + """ + + @abstractmethod + def _update_feature_log_prob(self, alpha): + """Update feature log probabilities based on counts. + + This method is called each time `fit` or `partial_fit` update the + model. + + Parameters + ---------- + alpha : float + smoothing parameter. See :meth:`_check_alpha`. + """ + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + return self._validate_data(X, accept_sparse="csr", reset=False) + + def _check_X_y(self, X, y, reset=True): + """Validate X and y in fit methods.""" + return self._validate_data(X, y, accept_sparse="csr", reset=reset) + + def _update_class_log_prior(self, class_prior=None): + """Update class log priors. + + The class log priors are based on `class_prior`, class count or the + number of classes. This method is called each time `fit` or + `partial_fit` update the model. + """ + n_classes = len(self.classes_) + if class_prior is not None: + if len(class_prior) != n_classes: + raise ValueError("Number of priors must match number of classes.") + self.class_log_prior_ = np.log(class_prior) + elif self.fit_prior: + with warnings.catch_warnings(): + # silence the warning when count is 0 because class was not yet + # observed + warnings.simplefilter("ignore", RuntimeWarning) + log_class_count = np.log(self.class_count_) + + # empirical prior, with sample_weight taken into account + self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum()) + else: + self.class_log_prior_ = np.full(n_classes, -np.log(n_classes)) + + def _check_alpha(self): + alpha = ( + np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha + ) + alpha_min = np.min(alpha) + if isinstance(alpha, np.ndarray): + if not alpha.shape[0] == self.n_features_in_: + raise ValueError( + "When alpha is an array, it should contains `n_features`. " + f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}." + ) + # check that all alpha are positive + if alpha_min < 0: + raise ValueError("All values in alpha must be greater than 0.") + alpha_lower_bound = 1e-10 + if alpha_min < alpha_lower_bound and not self.force_alpha: + warnings.warn( + "alpha too small will result in numeric errors, setting alpha =" + f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha" + " unchanged." + ) + return np.maximum(alpha, alpha_lower_bound) + return alpha + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance overhead hence it is better to call + partial_fit on chunks of data that are as large as possible + (as long as fitting in the memory budget) to hide the overhead. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + first_call = not hasattr(self, "classes_") + + X, y = self._check_X_y(X, y, reset=first_call) + _, n_features = X.shape + + if _check_partial_fit_first_call(self, classes): + # This is the first call to partial_fit: + # initialize various cumulative counters + n_classes = len(classes) + self._init_counters(n_classes, n_features) + + Y = label_binarize(y, classes=self.classes_) + if Y.shape[1] == 1: + if len(self.classes_) == 2: + Y = np.concatenate((1 - Y, Y), axis=1) + else: # degenerate case: just one class + Y = np.ones_like(Y) + + if X.shape[0] != Y.shape[0]: + msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." + raise ValueError(msg % (X.shape[0], y.shape[0])) + + # label_binarize() returns arrays with dtype=np.int64. + # We convert it to np.float64 to support sample_weight consistently + Y = Y.astype(np.float64, copy=False) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + sample_weight = np.atleast_2d(sample_weight) + Y *= sample_weight.T + + class_prior = self.class_prior + + # Count raw events from data before updating the class log prior + # and feature log probas + self._count(X, Y) + + # XXX: OPTIM: we could introduce a public finalization method to + # be called by the user explicitly just once after several consecutive + # calls to partial_fit and prior any call to predict[_[log_]proba] + # to avoid computing the smooth log probas at each call to partial fit + alpha = self._check_alpha() + self._update_feature_log_prob(alpha) + self._update_class_log_prior(class_prior=class_prior) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Naive Bayes classifier according to X, y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + X, y = self._check_X_y(X, y) + _, n_features = X.shape + + labelbin = LabelBinarizer() + Y = labelbin.fit_transform(y) + self.classes_ = labelbin.classes_ + if Y.shape[1] == 1: + if len(self.classes_) == 2: + Y = np.concatenate((1 - Y, Y), axis=1) + else: # degenerate case: just one class + Y = np.ones_like(Y) + + # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. + # We convert it to np.float64 to support sample_weight consistently; + # this means we also don't have to cast X to floating point + if sample_weight is not None: + Y = Y.astype(np.float64, copy=False) + sample_weight = _check_sample_weight(sample_weight, X) + sample_weight = np.atleast_2d(sample_weight) + Y *= sample_weight.T + + class_prior = self.class_prior + + # Count raw events from data before updating the class log prior + # and feature log probas + n_classes = Y.shape[1] + self._init_counters(n_classes, n_features) + self._count(X, Y) + alpha = self._check_alpha() + self._update_feature_log_prob(alpha) + self._update_class_log_prior(class_prior=class_prior) + return self + + def _init_counters(self, n_classes, n_features): + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64) + + def _more_tags(self): + return {"poor_score": True} + + +class MultinomialNB(_BaseDiscreteNB): + """ + Naive Bayes classifier for multinomial models. + + The multinomial Naive Bayes classifier is suitable for classification with + discrete features (e.g., word counts for text classification). The + multinomial distribution normally requires integer feature counts. However, + in practice, fractional counts such as tf-idf may also work. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) + during fitting. This value is weighted by the sample weight when + provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical log probability of features + given a class, ``P(x_i|y)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : Complement Naive Bayes classifier. + GaussianNB : Gaussian Naive Bayes. + + References + ---------- + C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to + Information Retrieval. Cambridge University Press, pp. 234-265. + https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import MultinomialNB + >>> clf = MultinomialNB() + >>> clf.fit(X, y) + MultinomialNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + def __init__( + self, *, alpha=1.0, force_alpha=True, fit_prior=True, class_prior=None + ): + super().__init__( + alpha=alpha, + fit_prior=fit_prior, + class_prior=class_prior, + force_alpha=force_alpha, + ) + + def _more_tags(self): + return {"requires_positive_X": True} + + def _count(self, X, Y): + """Count and smooth feature occurrences.""" + check_non_negative(X, "MultinomialNB (input X)") + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and recompute log probabilities""" + smoothed_fc = self.feature_count_ + alpha + smoothed_cc = smoothed_fc.sum(axis=1) + + self.feature_log_prob_ = np.log(smoothed_fc) - np.log( + smoothed_cc.reshape(-1, 1) + ) + + def _joint_log_likelihood(self, X): + """Calculate the posterior log probability of the samples X""" + return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_ + + +class ComplementNB(_BaseDiscreteNB): + """The Complement Naive Bayes classifier described in Rennie et al. (2003). + + The Complement Naive Bayes classifier was designed to correct the "severe + assumptions" made by the standard Multinomial Naive Bayes classifier. It is + particularly suited for imbalanced data sets. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Only used in edge case with a single class in the training set. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. Not used. + + norm : bool, default=False + Whether or not a second normalization of the weights is performed. The + default behavior mirrors the implementations found in Mahout and Weka, + which do not follow the full algorithm described in Table 9 of the + paper. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. Only used in edge + case with a single class in the training set. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_all_ : ndarray of shape (n_features,) + Number of samples encountered for each feature during fitting. This + value is weighted by the sample weight when provided. + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) during fitting. + This value is weighted by the sample weight when provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical weights for class complements. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + GaussianNB : Gaussian Naive Bayes. + MultinomialNB : Naive Bayes classifier for multinomial models. + + References + ---------- + Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003). + Tackling the poor assumptions of naive bayes text classifiers. In ICML + (Vol. 3, pp. 616-623). + https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import ComplementNB + >>> clf = ComplementNB() + >>> clf.fit(X, y) + ComplementNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "norm": ["boolean"], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + fit_prior=True, + class_prior=None, + norm=False, + ): + super().__init__( + alpha=alpha, + force_alpha=force_alpha, + fit_prior=fit_prior, + class_prior=class_prior, + ) + self.norm = norm + + def _more_tags(self): + return {"requires_positive_X": True} + + def _count(self, X, Y): + """Count feature occurrences.""" + check_non_negative(X, "ComplementNB (input X)") + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + self.feature_all_ = self.feature_count_.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and compute the weights.""" + comp_count = self.feature_all_ + alpha - self.feature_count_ + logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True)) + # _BaseNB.predict uses argmax, but ComplementNB operates with argmin. + if self.norm: + summed = logged.sum(axis=1, keepdims=True) + feature_log_prob = logged / summed + else: + feature_log_prob = -logged + self.feature_log_prob_ = feature_log_prob + + def _joint_log_likelihood(self, X): + """Calculate the class scores for the samples in X.""" + jll = safe_sparse_dot(X, self.feature_log_prob_.T) + if len(self.classes_) == 1: + jll += self.class_log_prior_ + return jll + + +class BernoulliNB(_BaseDiscreteNB): + """Naive Bayes classifier for multivariate Bernoulli models. + + Like MultinomialNB, this classifier is suitable for discrete data. The + difference is that while MultinomialNB works with occurrence counts, + BernoulliNB is designed for binary/boolean features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + binarize : float or None, default=0.0 + Threshold for binarizing (mapping to booleans) of sample features. + If None, input is presumed to already consist of binary vectors. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Log probability of each class (smoothed). + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) + during fitting. This value is weighted by the sample weight when + provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical log probability of features given a class, P(x_i|y). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : The Complement Naive Bayes classifier + described in Rennie et al. (2003). + GaussianNB : Gaussian Naive Bayes (GaussianNB). + MultinomialNB : Naive Bayes classifier for multinomial models. + + References + ---------- + C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to + Information Retrieval. Cambridge University Press, pp. 234-265. + https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html + + A. McCallum and K. Nigam (1998). A comparison of event models for naive + Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for + Text Categorization, pp. 41-48. + + V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with + naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> Y = np.array([1, 2, 3, 4, 4, 5]) + >>> from sklearn.naive_bayes import BernoulliNB + >>> clf = BernoulliNB() + >>> clf.fit(X, Y) + BernoulliNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "binarize": [None, Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + binarize=0.0, + fit_prior=True, + class_prior=None, + ): + super().__init__( + alpha=alpha, + fit_prior=fit_prior, + class_prior=class_prior, + force_alpha=force_alpha, + ) + self.binarize = binarize + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + X = super()._check_X(X) + if self.binarize is not None: + X = binarize(X, threshold=self.binarize) + return X + + def _check_X_y(self, X, y, reset=True): + X, y = super()._check_X_y(X, y, reset=reset) + if self.binarize is not None: + X = binarize(X, threshold=self.binarize) + return X, y + + def _count(self, X, Y): + """Count and smooth feature occurrences.""" + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and recompute log probabilities""" + smoothed_fc = self.feature_count_ + alpha + smoothed_cc = self.class_count_ + alpha * 2 + + self.feature_log_prob_ = np.log(smoothed_fc) - np.log( + smoothed_cc.reshape(-1, 1) + ) + + def _joint_log_likelihood(self, X): + """Calculate the posterior log probability of the samples X""" + n_features = self.feature_log_prob_.shape[1] + n_features_X = X.shape[1] + + if n_features_X != n_features: + raise ValueError( + "Expected input with %d features, got %d instead" + % (n_features, n_features_X) + ) + + neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) + # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob + jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) + jll += self.class_log_prior_ + neg_prob.sum(axis=1) + + return jll + + +class CategoricalNB(_BaseDiscreteNB): + """Naive Bayes classifier for categorical features. + + The categorical Naive Bayes classifier is suitable for classification with + discrete features that are categorically distributed. The categories of + each feature are drawn from a categorical distribution. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + min_categories : int or array-like of shape (n_features,), default=None + Minimum number of categories per feature. + + - integer: Sets the minimum number of categories per feature to + `n_categories` for each features. + - array-like: shape (n_features,) where `n_categories[i]` holds the + minimum number of categories for the ith column of the input. + - None (default): Determines the number of categories automatically + from the training data. + + .. versionadded:: 0.24 + + Attributes + ---------- + category_count_ : list of arrays of shape (n_features,) + Holds arrays of shape (n_classes, n_categories of respective feature) + for each feature. Each array provides the number of samples + encountered for each class and category of the specific feature. + + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_log_prob_ : list of arrays of shape (n_features,) + Holds arrays of shape (n_classes, n_categories of respective feature) + for each feature. Each array provides the empirical log probability + of categories given the respective feature and class, ``P(x_i|y)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_categories_ : ndarray of shape (n_features,), dtype=np.int64 + Number of categories for each feature. This value is + inferred from the data or set by the minimum number of categories. + + .. versionadded:: 0.24 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + ComplementNB : Complement Naive Bayes classifier. + GaussianNB : Gaussian Naive Bayes. + MultinomialNB : Naive Bayes classifier for multinomial models. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import CategoricalNB + >>> clf = CategoricalNB() + >>> clf.fit(X, y) + CategoricalNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "min_categories": [ + None, + "array-like", + Interval(Integral, 1, None, closed="left"), + ], + "alpha": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + fit_prior=True, + class_prior=None, + min_categories=None, + ): + super().__init__( + alpha=alpha, + force_alpha=force_alpha, + fit_prior=fit_prior, + class_prior=class_prior, + ) + self.min_categories = min_categories + + def fit(self, X, y, sample_weight=None): + """Fit Naive Bayes classifier according to X, y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. Here, each feature of X is + assumed to be from a different categorical distribution. + It is further assumed that all categories of each feature are + represented by the numbers 0, ..., n - 1, where n refers to the + total number of categories for the given feature. This can, for + instance, be achieved with the help of OrdinalEncoder. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().fit(X, y, sample_weight=sample_weight) + + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance overhead hence it is better to call + partial_fit on chunks of data that are as large as possible + (as long as fitting in the memory budget) to hide the overhead. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. Here, each feature of X is + assumed to be from a different categorical distribution. + It is further assumed that all categories of each feature are + represented by the numbers 0, ..., n - 1, where n refers to the + total number of categories for the given feature. This can, for + instance, be achieved with the help of OrdinalEncoder. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().partial_fit(X, y, classes, sample_weight=sample_weight) + + def _more_tags(self): + return {"requires_positive_X": True} + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + X = self._validate_data( + X, dtype="int", accept_sparse=False, force_all_finite=True, reset=False + ) + check_non_negative(X, "CategoricalNB (input X)") + return X + + def _check_X_y(self, X, y, reset=True): + X, y = self._validate_data( + X, y, dtype="int", accept_sparse=False, force_all_finite=True, reset=reset + ) + check_non_negative(X, "CategoricalNB (input X)") + return X, y + + def _init_counters(self, n_classes, n_features): + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + self.category_count_ = [np.zeros((n_classes, 0)) for _ in range(n_features)] + + @staticmethod + def _validate_n_categories(X, min_categories): + # rely on max for n_categories categories are encoded between 0...n-1 + n_categories_X = X.max(axis=0) + 1 + min_categories_ = np.array(min_categories) + if min_categories is not None: + if not np.issubdtype(min_categories_.dtype, np.signedinteger): + raise ValueError( + "'min_categories' should have integral type. Got " + f"{min_categories_.dtype} instead." + ) + n_categories_ = np.maximum(n_categories_X, min_categories_, dtype=np.int64) + if n_categories_.shape != n_categories_X.shape: + raise ValueError( + f"'min_categories' should have shape ({X.shape[1]}," + ") when an array-like is provided. Got" + f" {min_categories_.shape} instead." + ) + return n_categories_ + else: + return n_categories_X + + def _count(self, X, Y): + def _update_cat_count_dims(cat_count, highest_feature): + diff = highest_feature + 1 - cat_count.shape[1] + if diff > 0: + # we append a column full of zeros for each new category + return np.pad(cat_count, [(0, 0), (0, diff)], "constant") + return cat_count + + def _update_cat_count(X_feature, Y, cat_count, n_classes): + for j in range(n_classes): + mask = Y[:, j].astype(bool) + if Y.dtype.type == np.int64: + weights = None + else: + weights = Y[mask, j] + counts = np.bincount(X_feature[mask], weights=weights) + indices = np.nonzero(counts)[0] + cat_count[j, indices] += counts[indices] + + self.class_count_ += Y.sum(axis=0) + self.n_categories_ = self._validate_n_categories(X, self.min_categories) + for i in range(self.n_features_in_): + X_feature = X[:, i] + self.category_count_[i] = _update_cat_count_dims( + self.category_count_[i], self.n_categories_[i] - 1 + ) + _update_cat_count( + X_feature, Y, self.category_count_[i], self.class_count_.shape[0] + ) + + def _update_feature_log_prob(self, alpha): + feature_log_prob = [] + for i in range(self.n_features_in_): + smoothed_cat_count = self.category_count_[i] + alpha + smoothed_class_count = smoothed_cat_count.sum(axis=1) + feature_log_prob.append( + np.log(smoothed_cat_count) - np.log(smoothed_class_count.reshape(-1, 1)) + ) + self.feature_log_prob_ = feature_log_prob + + def _joint_log_likelihood(self, X): + self._check_n_features(X, reset=False) + jll = np.zeros((X.shape[0], self.class_count_.shape[0])) + for i in range(self.n_features_in_): + indices = X[:, i] + jll += self.feature_log_prob_[i][:, indices].T + total_ll = jll + self.class_log_prior_ + return total_ll diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/pipeline.py b/llmeval-env/lib/python3.10/site-packages/sklearn/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..4df21618be4eec1a1aa56e5785e14789d4e23448 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/pipeline.py @@ -0,0 +1,1776 @@ +""" +The :mod:`sklearn.pipeline` module implements utilities to build a composite +estimator, as a chain of transforms and estimators. +""" +# Author: Edouard Duchesnay +# Gael Varoquaux +# Virgile Fritsch +# Alexandre Gramfort +# Lars Buitinck +# License: BSD + +from collections import defaultdict +from itertools import islice + +import numpy as np +from scipy import sparse + +from .base import TransformerMixin, _fit_context, clone +from .exceptions import NotFittedError +from .preprocessing import FunctionTransformer +from .utils import Bunch, _print_elapsed_time +from .utils._estimator_html_repr import _VisualBlock +from .utils._metadata_requests import METHODS +from .utils._param_validation import HasMethods, Hidden +from .utils._set_output import ( + _get_container_adapter, + _safe_set_output, +) +from .utils._tags import _safe_tags +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _raise_for_unsupported_routing, + _routing_enabled, + _RoutingNotSupportedMixin, + process_routing, +) +from .utils.metaestimators import _BaseComposition, available_if +from .utils.parallel import Parallel, delayed +from .utils.validation import check_is_fitted, check_memory + +__all__ = ["Pipeline", "FeatureUnion", "make_pipeline", "make_union"] + + +def _final_estimator_has(attr): + """Check that final_estimator has `attr`. + + Used together with `available_if` in `Pipeline`.""" + + def check(self): + # raise original `AttributeError` if `attr` does not exist + getattr(self._final_estimator, attr) + return True + + return check + + +class Pipeline(_BaseComposition): + """ + A sequence of data transformers with an optional final predictor. + + `Pipeline` allows you to sequentially apply a list of transformers to + preprocess the data and, if desired, conclude the sequence with a final + :term:`predictor` for predictive modeling. + + Intermediate steps of the pipeline must be 'transforms', that is, they + must implement `fit` and `transform` methods. + The final :term:`estimator` only needs to implement `fit`. + The transformers in the pipeline can be cached using ``memory`` argument. + + The purpose of the pipeline is to assemble several steps that can be + cross-validated together while setting different parameters. For this, it + enables setting parameters of the various steps using their names and the + parameter name separated by a `'__'`, as in the example below. A step's + estimator may be replaced entirely by setting the parameter with its name + to another estimator, or a transformer removed by setting it to + `'passthrough'` or `None`. + + For an example use case of `Pipeline` combined with + :class:`~sklearn.model_selection.GridSearchCV`, refer to + :ref:`sphx_glr_auto_examples_compose_plot_compare_reduction.py`. The + example :ref:`sphx_glr_auto_examples_compose_plot_digits_pipe.py` shows how + to grid search on a pipeline using `'__'` as a separator in the parameter names. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.5 + + Parameters + ---------- + steps : list of tuples + List of (name of step, estimator) tuples that are to be chained in + sequential order. To be compatible with the scikit-learn API, all steps + must define `fit`. All non-last steps must also define `transform`. See + :ref:`Combining Estimators ` for more details. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the fitted transformers of the pipeline. The last step + will never be cached, even if it is a transformer. By default, no + caching is performed. If a string is given, it is the path to the + caching directory. Enabling caching triggers a clone of the transformers + before fitting. Therefore, the transformer instance given to the + pipeline cannot be inspected directly. Use the attribute ``named_steps`` + or ``steps`` to inspect estimators within the pipeline. Caching the + transformers is advantageous when fitting is time consuming. + + verbose : bool, default=False + If True, the time elapsed while fitting each step will be printed as it + is completed. + + Attributes + ---------- + named_steps : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + Read-only attribute to access any step parameter by user given name. + Keys are step names and values are steps parameters. + + classes_ : ndarray of shape (n_classes,) + The classes labels. Only exist if the last step of the pipeline is a + classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying first estimator in `steps` exposes such an attribute + when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + make_pipeline : Convenience function for simplified pipeline construction. + + Examples + -------- + >>> from sklearn.svm import SVC + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.pipeline import Pipeline + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... random_state=0) + >>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())]) + >>> # The pipeline can be used as any other estimator + >>> # and avoids leaking the test set into the train set + >>> pipe.fit(X_train, y_train).score(X_test, y_test) + 0.88 + >>> # An estimator's parameter can be set using '__' syntax + >>> pipe.set_params(svc__C=10).fit(X_train, y_train).score(X_test, y_test) + 0.76 + """ + + # BaseEstimator interface + _required_parameters = ["steps"] + + _parameter_constraints: dict = { + "steps": [list, Hidden(tuple)], + "memory": [None, str, HasMethods(["cache"])], + "verbose": ["boolean"], + } + + def __init__(self, steps, *, memory=None, verbose=False): + self.steps = steps + self.memory = memory + self.verbose = verbose + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + Calling `set_output` will set the output of all estimators in `steps`. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + for _, _, step in self._iter(): + _safe_set_output(step, transform=transform) + return self + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `steps` of the `Pipeline`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : mapping of string to any + Parameter names mapped to their values. + """ + return self._get_params("steps", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that + you can directly set the parameters of the estimators contained in + `steps`. + + Parameters + ---------- + **kwargs : dict + Parameters of this estimator or parameters of estimators contained + in `steps`. Parameters of the steps may be set using its name and + the parameter name separated by a '__'. + + Returns + ------- + self : object + Pipeline class instance. + """ + self._set_params("steps", **kwargs) + return self + + def _validate_steps(self): + names, estimators = zip(*self.steps) + + # validate names + self._validate_names(names) + + # validate estimators + transformers = estimators[:-1] + estimator = estimators[-1] + + for t in transformers: + if t is None or t == "passthrough": + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + raise TypeError( + "All intermediate steps should be " + "transformers and implement fit and transform " + "or be the string 'passthrough' " + "'%s' (type %s) doesn't" % (t, type(t)) + ) + + # We allow last estimator to be None as an identity transformation + if ( + estimator is not None + and estimator != "passthrough" + and not hasattr(estimator, "fit") + ): + raise TypeError( + "Last step of Pipeline should implement fit " + "or be the string 'passthrough'. " + "'%s' (type %s) doesn't" % (estimator, type(estimator)) + ) + + def _iter(self, with_final=True, filter_passthrough=True): + """ + Generate (idx, (name, trans)) tuples from self.steps + + When filter_passthrough is True, 'passthrough' and None transformers + are filtered out. + """ + stop = len(self.steps) + if not with_final: + stop -= 1 + + for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)): + if not filter_passthrough: + yield idx, name, trans + elif trans is not None and trans != "passthrough": + yield idx, name, trans + + def __len__(self): + """ + Returns the length of the Pipeline + """ + return len(self.steps) + + def __getitem__(self, ind): + """Returns a sub-pipeline or a single estimator in the pipeline + + Indexing with an integer will return an estimator; using a slice + returns another Pipeline instance which copies a slice of this + Pipeline. This copy is shallow: modifying (or fitting) estimators in + the sub-pipeline will affect the larger pipeline and vice-versa. + However, replacing a value in `step` will not affect a copy. + """ + if isinstance(ind, slice): + if ind.step not in (1, None): + raise ValueError("Pipeline slicing only supports a step of 1") + return self.__class__( + self.steps[ind], memory=self.memory, verbose=self.verbose + ) + try: + name, est = self.steps[ind] + except TypeError: + # Not an int, try get step by name + return self.named_steps[ind] + return est + + @property + def _estimator_type(self): + return self.steps[-1][1]._estimator_type + + @property + def named_steps(self): + """Access the steps by name. + + Read-only attribute to access any step by given name. + Keys are steps names and values are the steps objects.""" + # Use Bunch object to improve autocomplete + return Bunch(**dict(self.steps)) + + @property + def _final_estimator(self): + try: + estimator = self.steps[-1][1] + return "passthrough" if estimator is None else estimator + except (ValueError, AttributeError, TypeError): + # This condition happens when a call to a method is first calling + # `_available_if` and `fit` did not validate `steps` yet. We + # return `None` and an `InvalidParameterError` will be raised + # right after. + return None + + def _log_message(self, step_idx): + if not self.verbose: + return None + name, _ = self.steps[step_idx] + + return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name) + + def _check_method_params(self, method, props, **kwargs): + if _routing_enabled(): + routed_params = process_routing(self, method, **props, **kwargs) + return routed_params + else: + fit_params_steps = Bunch( + **{ + name: Bunch(**{method: {} for method in METHODS}) + for name, step in self.steps + if step is not None + } + ) + for pname, pval in props.items(): + if "__" not in pname: + raise ValueError( + "Pipeline.fit does not accept the {} parameter. " + "You can pass parameters to specific steps of your " + "pipeline using the stepname__parameter format, e.g. " + "`Pipeline.fit(X, y, logisticregression__sample_weight" + "=sample_weight)`.".format(pname) + ) + step, param = pname.split("__", 1) + fit_params_steps[step]["fit"][param] = pval + # without metadata routing, fit_transform and fit_predict + # get all the same params and pass it to the last fit. + fit_params_steps[step]["fit_transform"][param] = pval + fit_params_steps[step]["fit_predict"][param] = pval + return fit_params_steps + + # Estimator interface + + def _fit(self, X, y=None, routed_params=None): + # shallow copy of steps - this should really be steps_ + self.steps = list(self.steps) + self._validate_steps() + # Setup the memory + memory = check_memory(self.memory) + + fit_transform_one_cached = memory.cache(_fit_transform_one) + + for step_idx, name, transformer in self._iter( + with_final=False, filter_passthrough=False + ): + if transformer is None or transformer == "passthrough": + with _print_elapsed_time("Pipeline", self._log_message(step_idx)): + continue + + if hasattr(memory, "location") and memory.location is None: + # we do not clone when caching is disabled to + # preserve backward compatibility + cloned_transformer = transformer + else: + cloned_transformer = clone(transformer) + # Fit or load from cache the current transformer + X, fitted_transformer = fit_transform_one_cached( + cloned_transformer, + X, + y, + None, + message_clsname="Pipeline", + message=self._log_message(step_idx), + params=routed_params[name], + ) + # Replace the transformer of the step with the fitted + # transformer. This is necessary when loading the transformer + # from the cache. + self.steps[step_idx] = (name, fitted_transformer) + return X + + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Fit the model. + + Fit all the transformers one after the other and sequentially transform the + data. Finally, fit the transformed data using the final estimator. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of the + pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps of + the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters passed to the ``fit`` method of each step, where + each parameter name is prefixed such that parameter ``p`` for step + ``s`` has key ``s__p``. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Pipeline with fitted steps. + """ + routed_params = self._check_method_params(method="fit", props=params) + Xt = self._fit(X, y, routed_params) + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + if self._final_estimator != "passthrough": + last_step_params = routed_params[self.steps[-1][0]] + self._final_estimator.fit(Xt, y, **last_step_params["fit"]) + + return self + + def _can_fit_transform(self): + return ( + self._final_estimator == "passthrough" + or hasattr(self._final_estimator, "transform") + or hasattr(self._final_estimator, "fit_transform") + ) + + @available_if(_can_fit_transform) + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None, **params): + """Fit the model and transform with the final estimator. + + Fit all the transformers one after the other and sequentially transform + the data. Only valid if the final estimator either implements + `fit_transform` or `fit` and `transform`. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of the + pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps of + the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters passed to the ``fit`` method of each step, where + each parameter name is prefixed such that parameter ``p`` for step + ``s`` has key ``s__p``. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_transformed_features) + Transformed samples. + """ + routed_params = self._check_method_params(method="fit_transform", props=params) + Xt = self._fit(X, y, routed_params) + + last_step = self._final_estimator + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + if last_step == "passthrough": + return Xt + last_step_params = routed_params[self.steps[-1][0]] + if hasattr(last_step, "fit_transform"): + return last_step.fit_transform( + Xt, y, **last_step_params["fit_transform"] + ) + else: + return last_step.fit(Xt, y, **last_step_params["fit"]).transform( + Xt, **last_step_params["transform"] + ) + + @available_if(_final_estimator_has("predict")) + def predict(self, X, **params): + """Transform the data, and apply `predict` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls `predict` + method. Only valid if the final estimator implements `predict`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters to the ``predict`` called at the end of all + transformations in the pipeline. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Note that while this may be used to return uncertainties from some + models with ``return_std`` or ``return_cov``, uncertainties that are + generated by the transformations in the pipeline are not propagated + to the final estimator. + + Returns + ------- + y_pred : ndarray + Result of calling `predict` on the final estimator. + """ + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict) + + @available_if(_final_estimator_has("fit_predict")) + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit_predict(self, X, y=None, **params): + """Transform the data, and apply `fit_predict` with the final estimator. + + Call `fit_transform` of each transformer in the pipeline. The + transformed data are finally passed to the final estimator that calls + `fit_predict` method. Only valid if the final estimator implements + `fit_predict`. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of + the pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters to the ``predict`` called at the end of all + transformations in the pipeline. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Note that while this may be used to return uncertainties from some + models with ``return_std`` or ``return_cov``, uncertainties that are + generated by the transformations in the pipeline are not propagated + to the final estimator. + + Returns + ------- + y_pred : ndarray + Result of calling `fit_predict` on the final estimator. + """ + routed_params = self._check_method_params(method="fit_predict", props=params) + Xt = self._fit(X, y, routed_params) + + params_last_step = routed_params[self.steps[-1][0]] + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + y_pred = self.steps[-1][1].fit_predict( + Xt, y, **params_last_step.get("fit_predict", {}) + ) + return y_pred + + @available_if(_final_estimator_has("predict_proba")) + def predict_proba(self, X, **params): + """Transform the data, and apply `predict_proba` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `predict_proba` method. Only valid if the final estimator implements + `predict_proba`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters to the `predict_proba` called at the end of all + transformations in the pipeline. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_proba : ndarray of shape (n_samples, n_classes) + Result of calling `predict_proba` on the final estimator. + """ + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict_proba(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict_proba", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict_proba( + Xt, **routed_params[self.steps[-1][0]].predict_proba + ) + + @available_if(_final_estimator_has("decision_function")) + def decision_function(self, X, **params): + """Transform the data, and apply `decision_function` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `decision_function` method. Only valid if the final estimator + implements `decision_function`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of string -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_score : ndarray of shape (n_samples, n_classes) + Result of calling `decision_function` on the final estimator. + """ + _raise_for_params(params, self, "decision_function") + + # not branching here since params is only available if + # enable_metadata_routing=True + routed_params = process_routing(self, "decision_function", **params) + + Xt = X + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform( + Xt, **routed_params.get(name, {}).get("transform", {}) + ) + return self.steps[-1][1].decision_function( + Xt, **routed_params.get(self.steps[-1][0], {}).get("decision_function", {}) + ) + + @available_if(_final_estimator_has("score_samples")) + def score_samples(self, X): + """Transform the data, and apply `score_samples` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `score_samples` method. Only valid if the final estimator implements + `score_samples`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + Returns + ------- + y_score : ndarray of shape (n_samples,) + Result of calling `score_samples` on the final estimator. + """ + Xt = X + for _, _, transformer in self._iter(with_final=False): + Xt = transformer.transform(Xt) + return self.steps[-1][1].score_samples(Xt) + + @available_if(_final_estimator_has("predict_log_proba")) + def predict_log_proba(self, X, **params): + """Transform the data, and apply `predict_log_proba` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `predict_log_proba` method. Only valid if the final estimator + implements `predict_log_proba`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): + + Parameters to the `predict_log_proba` called at the end of all + transformations in the pipeline. + + - If `enable_metadata_routing=True`: + + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_log_proba : ndarray of shape (n_samples, n_classes) + Result of calling `predict_log_proba` on the final estimator. + """ + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict_log_proba(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict_log_proba", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict_log_proba( + Xt, **routed_params[self.steps[-1][0]].predict_log_proba + ) + + def _can_transform(self): + return self._final_estimator == "passthrough" or hasattr( + self._final_estimator, "transform" + ) + + @available_if(_can_transform) + def transform(self, X, **params): + """Transform the data, and apply `transform` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `transform` method. Only valid if the final estimator + implements `transform`. + + This also works where final estimator is `None` in which case all prior + transformations are applied. + + Parameters + ---------- + X : iterable + Data to transform. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_transformed_features) + Transformed data. + """ + _raise_for_params(params, self, "transform") + + # not branching here since params is only available if + # enable_metadata_routing=True + routed_params = process_routing(self, "transform", **params) + Xt = X + for _, name, transform in self._iter(): + Xt = transform.transform(Xt, **routed_params[name].transform) + return Xt + + def _can_inverse_transform(self): + return all(hasattr(t, "inverse_transform") for _, _, t in self._iter()) + + @available_if(_can_inverse_transform) + def inverse_transform(self, Xt, **params): + """Apply `inverse_transform` for each step in a reverse order. + + All estimators in the pipeline must support `inverse_transform`. + + Parameters + ---------- + Xt : array-like of shape (n_samples, n_transformed_features) + Data samples, where ``n_samples`` is the number of samples and + ``n_features`` is the number of features. Must fulfill + input requirements of last step of pipeline's + ``inverse_transform`` method. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Inverse transformed data, that is, data in the original feature + space. + """ + _raise_for_params(params, self, "inverse_transform") + + # we don't have to branch here, since params is only non-empty if + # enable_metadata_routing=True. + routed_params = process_routing(self, "inverse_transform", **params) + reverse_iter = reversed(list(self._iter())) + for _, name, transform in reverse_iter: + Xt = transform.inverse_transform( + Xt, **routed_params[name].inverse_transform + ) + return Xt + + @available_if(_final_estimator_has("score")) + def score(self, X, y=None, sample_weight=None, **params): + """Transform the data, and apply `score` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `score` method. Only valid if the final estimator implements `score`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + y : iterable, default=None + Targets used for scoring. Must fulfill label requirements for all + steps of the pipeline. + + sample_weight : array-like, default=None + If not None, this argument is passed as ``sample_weight`` keyword + argument to the ``score`` method of the final estimator. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + score : float + Result of calling `score` on the final estimator. + """ + Xt = X + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + score_params = {} + if sample_weight is not None: + score_params["sample_weight"] = sample_weight + return self.steps[-1][1].score(Xt, y, **score_params) + + # metadata routing is enabled. + routed_params = process_routing( + self, "score", sample_weight=sample_weight, **params + ) + + Xt = X + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score) + + @property + def classes_(self): + """The classes labels. Only exist if the last step is a classifier.""" + return self.steps[-1][1].classes_ + + def _more_tags(self): + tags = { + "_xfail_checks": { + "check_dont_overwrite_parameters": ( + "Pipeline changes the `steps` parameter, which it shouldn't." + "Therefore this test is x-fail until we fix this." + ), + "check_estimators_overwrite_params": ( + "Pipeline changes the `steps` parameter, which it shouldn't." + "Therefore this test is x-fail until we fix this." + ), + } + } + + try: + tags["pairwise"] = _safe_tags(self.steps[0][1], "pairwise") + except (ValueError, AttributeError, TypeError): + # This happens when the `steps` is not a list of (name, estimator) + # tuples and `fit` is not called yet to validate the steps. + pass + + try: + tags["multioutput"] = _safe_tags(self.steps[-1][1], "multioutput") + except (ValueError, AttributeError, TypeError): + # This happens when the `steps` is not a list of (name, estimator) + # tuples and `fit` is not called yet to validate the steps. + pass + + return tags + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Transform input features using the pipeline. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + feature_names_out = input_features + for _, name, transform in self._iter(): + if not hasattr(transform, "get_feature_names_out"): + raise AttributeError( + "Estimator {} does not provide get_feature_names_out. " + "Did you mean to call pipeline[:-1].get_feature_names_out" + "()?".format(name) + ) + feature_names_out = transform.get_feature_names_out(feature_names_out) + return feature_names_out + + @property + def n_features_in_(self): + """Number of features seen during first step `fit` method.""" + # delegate to first step (which will call _check_is_fitted) + return self.steps[0][1].n_features_in_ + + @property + def feature_names_in_(self): + """Names of features seen during first step `fit` method.""" + # delegate to first step (which will call _check_is_fitted) + return self.steps[0][1].feature_names_in_ + + def __sklearn_is_fitted__(self): + """Indicate whether pipeline has been fit.""" + try: + # check if the last step of the pipeline is fitted + # we only check the last step since if the last step is fit, it + # means the previous steps should also be fit. This is faster than + # checking if every step of the pipeline is fit. + check_is_fitted(self.steps[-1][1]) + return True + except NotFittedError: + return False + + def _sk_visual_block_(self): + _, estimators = zip(*self.steps) + + def _get_name(name, est): + if est is None or est == "passthrough": + return f"{name}: passthrough" + # Is an estimator + return f"{name}: {est.__class__.__name__}" + + names = [_get_name(name, est) for name, est in self.steps] + name_details = [str(est) for est in estimators] + return _VisualBlock( + "serial", + estimators, + names=names, + name_details=name_details, + dash_wrapped=False, + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + + # first we add all steps except the last one + for _, name, trans in self._iter(with_final=False, filter_passthrough=True): + method_mapping = MethodMapping() + # fit, fit_predict, and fit_transform call fit_transform if it + # exists, or else fit and transform + if hasattr(trans, "fit_transform"): + ( + method_mapping.add(caller="fit", callee="fit_transform") + .add(caller="fit_transform", callee="fit_transform") + .add(caller="fit_predict", callee="fit_transform") + ) + else: + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="fit", callee="transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + .add(caller="fit_predict", callee="fit") + .add(caller="fit_predict", callee="transform") + ) + + ( + method_mapping.add(caller="predict", callee="transform") + .add(caller="predict", callee="transform") + .add(caller="predict_proba", callee="transform") + .add(caller="decision_function", callee="transform") + .add(caller="predict_log_proba", callee="transform") + .add(caller="transform", callee="transform") + .add(caller="inverse_transform", callee="inverse_transform") + .add(caller="score", callee="transform") + ) + + router.add(method_mapping=method_mapping, **{name: trans}) + + final_name, final_est = self.steps[-1] + if final_est is None or final_est == "passthrough": + return router + + # then we add the last step + method_mapping = MethodMapping() + if hasattr(final_est, "fit_transform"): + method_mapping.add(caller="fit_transform", callee="fit_transform") + else: + method_mapping.add(caller="fit", callee="fit").add( + caller="fit", callee="transform" + ) + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="predict", callee="predict") + .add(caller="fit_predict", callee="fit_predict") + .add(caller="predict_proba", callee="predict_proba") + .add(caller="decision_function", callee="decision_function") + .add(caller="predict_log_proba", callee="predict_log_proba") + .add(caller="transform", callee="transform") + .add(caller="inverse_transform", callee="inverse_transform") + .add(caller="score", callee="score") + ) + + router.add(method_mapping=method_mapping, **{final_name: final_est}) + return router + + +def _name_estimators(estimators): + """Generate names for estimators.""" + + names = [ + estimator if isinstance(estimator, str) else type(estimator).__name__.lower() + for estimator in estimators + ] + namecount = defaultdict(int) + for est, name in zip(estimators, names): + namecount[name] += 1 + + for k, v in list(namecount.items()): + if v == 1: + del namecount[k] + + for i in reversed(range(len(estimators))): + name = names[i] + if name in namecount: + names[i] += "-%d" % namecount[name] + namecount[name] -= 1 + + return list(zip(names, estimators)) + + +def make_pipeline(*steps, memory=None, verbose=False): + """Construct a :class:`Pipeline` from the given estimators. + + This is a shorthand for the :class:`Pipeline` constructor; it does not + require, and does not permit, naming the estimators. Instead, their names + will be set to the lowercase of their types automatically. + + Parameters + ---------- + *steps : list of Estimator objects + List of the scikit-learn estimators that are chained together. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the fitted transformers of the pipeline. The last step + will never be cached, even if it is a transformer. By default, no + caching is performed. If a string is given, it is the path to the + caching directory. Enabling caching triggers a clone of the transformers + before fitting. Therefore, the transformer instance given to the + pipeline cannot be inspected directly. Use the attribute ``named_steps`` + or ``steps`` to inspect estimators within the pipeline. Caching the + transformers is advantageous when fitting is time consuming. + + verbose : bool, default=False + If True, the time elapsed while fitting each step will be printed as it + is completed. + + Returns + ------- + p : Pipeline + Returns a scikit-learn :class:`Pipeline` object. + + See Also + -------- + Pipeline : Class for creating a pipeline of transforms with a final + estimator. + + Examples + -------- + >>> from sklearn.naive_bayes import GaussianNB + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('gaussiannb', GaussianNB())]) + """ + return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose) + + +def _transform_one(transformer, X, y, weight, params): + """Call transform and apply weight to output. + + Parameters + ---------- + transformer : estimator + Estimator to be used for transformation. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data to be transformed. + + y : ndarray of shape (n_samples,) + Ignored. + + weight : float + Weight to be applied to the output of the transformation. + + params : dict + Parameters to be passed to the transformer's ``transform`` method. + + This should be of the form ``process_routing()["step_name"]``. + """ + res = transformer.transform(X, **params.transform) + # if we have a weight for this transformer, multiply output + if weight is None: + return res + return res * weight + + +def _fit_transform_one( + transformer, X, y, weight, message_clsname="", message=None, params=None +): + """ + Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned + with the fitted transformer. If ``weight`` is not ``None``, the result will + be multiplied by ``weight``. + + ``params`` needs to be of the form ``process_routing()["step_name"]``. + """ + params = params or {} + with _print_elapsed_time(message_clsname, message): + if hasattr(transformer, "fit_transform"): + res = transformer.fit_transform(X, y, **params.get("fit_transform", {})) + else: + res = transformer.fit(X, y, **params.get("fit", {})).transform( + X, **params.get("transform", {}) + ) + + if weight is None: + return res, transformer + return res * weight, transformer + + +def _fit_one(transformer, X, y, weight, message_clsname="", message=None, params=None): + """ + Fits ``transformer`` to ``X`` and ``y``. + """ + with _print_elapsed_time(message_clsname, message): + return transformer.fit(X, y, **params["fit"]) + + +class FeatureUnion(_RoutingNotSupportedMixin, TransformerMixin, _BaseComposition): + """Concatenates results of multiple transformer objects. + + This estimator applies a list of transformer objects in parallel to the + input data, then concatenates the results. This is useful to combine + several feature extraction mechanisms into a single transformer. + + Parameters of the transformers may be set using its name and the parameter + name separated by a '__'. A transformer may be replaced entirely by + setting the parameter with its name to another transformer, removed by + setting to 'drop' or disabled by setting to 'passthrough' (features are + passed without transformation). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + transformer_list : list of (str, transformer) tuples + List of transformer objects to be applied to the data. The first + half of each tuple is the name of the transformer. The transformer can + be 'drop' for it to be ignored or can be 'passthrough' for features to + be passed unchanged. + + .. versionadded:: 1.1 + Added the option `"passthrough"`. + + .. versionchanged:: 0.22 + Deprecated `None` as a transformer in favor of 'drop'. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + transformer_weights : dict, default=None + Multiplicative weights for features per transformer. + Keys are transformer names, values the weights. + Raises ValueError if key not present in ``transformer_list``. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + Attributes + ---------- + named_transformers : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + Read-only attribute to access any transformer parameter by user + given name. Keys are transformer names and values are + transformer parameters. + + .. versionadded:: 1.2 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying first transformer in `transformer_list` exposes such an + attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when + `X` has feature names that are all strings. + + .. versionadded:: 1.3 + + See Also + -------- + make_union : Convenience function for simplified feature union + construction. + + Examples + -------- + >>> from sklearn.pipeline import FeatureUnion + >>> from sklearn.decomposition import PCA, TruncatedSVD + >>> union = FeatureUnion([("pca", PCA(n_components=1)), + ... ("svd", TruncatedSVD(n_components=2))]) + >>> X = [[0., 1., 3], [2., 2., 5]] + >>> union.fit_transform(X) + array([[ 1.5 , 3.0..., 0.8...], + [-1.5 , 5.7..., -0.4...]]) + >>> # An estimator's parameter can be set using '__' syntax + >>> union.set_params(svd__n_components=1).fit_transform(X) + array([[ 1.5 , 3.0...], + [-1.5 , 5.7...]]) + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_compose_plot_feature_union.py`. + """ + + _required_parameters = ["transformer_list"] + + def __init__( + self, transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False + ): + self.transformer_list = transformer_list + self.n_jobs = n_jobs + self.transformer_weights = transformer_weights + self.verbose = verbose + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + `set_output` will set the output of all estimators in `transformer_list`. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `None`: Transform configuration is unchanged + + Returns + ------- + self : estimator instance + Estimator instance. + """ + super().set_output(transform=transform) + for _, step, _ in self._iter(): + _safe_set_output(step, transform=transform) + return self + + @property + def named_transformers(self): + # Use Bunch object to improve autocomplete + return Bunch(**dict(self.transformer_list)) + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `transformer_list` of the + `FeatureUnion`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : mapping of string to any + Parameter names mapped to their values. + """ + return self._get_params("transformer_list", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that + you can directly set the parameters of the estimators contained in + `transformer_list`. + + Parameters + ---------- + **kwargs : dict + Parameters of this estimator or parameters of estimators contained + in `transform_list`. Parameters of the transformers may be set + using its name and the parameter name separated by a '__'. + + Returns + ------- + self : object + FeatureUnion class instance. + """ + self._set_params("transformer_list", **kwargs) + return self + + def _validate_transformers(self): + names, transformers = zip(*self.transformer_list) + + # validate names + self._validate_names(names) + + # validate estimators + for t in transformers: + if t in ("drop", "passthrough"): + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + raise TypeError( + "All estimators should implement fit and " + "transform. '%s' (type %s) doesn't" % (t, type(t)) + ) + + def _validate_transformer_weights(self): + if not self.transformer_weights: + return + + transformer_names = set(name for name, _ in self.transformer_list) + for name in self.transformer_weights: + if name not in transformer_names: + raise ValueError( + f'Attempting to weight transformer "{name}", ' + "but it is not present in transformer_list." + ) + + def _iter(self): + """ + Generate (name, trans, weight) tuples excluding None and + 'drop' transformers. + """ + + get_weight = (self.transformer_weights or {}).get + + for name, trans in self.transformer_list: + if trans == "drop": + continue + if trans == "passthrough": + trans = FunctionTransformer(feature_names_out="one-to-one") + yield (name, trans, get_weight(name)) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + feature_names = [] + for name, trans, _ in self._iter(): + if not hasattr(trans, "get_feature_names_out"): + raise AttributeError( + "Transformer %s (type %s) does not provide get_feature_names_out." + % (str(name), type(trans).__name__) + ) + feature_names.extend( + [f"{name}__{f}" for f in trans.get_feature_names_out(input_features)] + ) + return np.asarray(feature_names, dtype=object) + + def fit(self, X, y=None, **fit_params): + """Fit all transformers using X. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data, used to fit transformers. + + y : array-like of shape (n_samples, n_outputs), default=None + Targets for supervised learning. + + **fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + Returns + ------- + self : object + FeatureUnion class instance. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + transformers = self._parallel_func(X, y, fit_params, _fit_one) + if not transformers: + # All transformers are None + return self + + self._update_transformer_list(transformers) + return self + + def fit_transform(self, X, y=None, **fit_params): + """Fit all transformers, transform the data and concatenate results. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data to be transformed. + + y : array-like of shape (n_samples, n_outputs), default=None + Targets for supervised learning. + + **fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + Returns + ------- + X_t : array-like or sparse matrix of \ + shape (n_samples, sum_n_components) + The `hstack` of results of transformers. `sum_n_components` is the + sum of `n_components` (output dimension) over transformers. + """ + results = self._parallel_func(X, y, fit_params, _fit_transform_one) + if not results: + # All transformers are None + return np.zeros((X.shape[0], 0)) + + Xs, transformers = zip(*results) + self._update_transformer_list(transformers) + + return self._hstack(Xs) + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return "(step %d of %d) Processing %s" % (idx, total, name) + + def _parallel_func(self, X, y, fit_params, func): + """Runs func in parallel on X and y""" + self.transformer_list = list(self.transformer_list) + self._validate_transformers() + self._validate_transformer_weights() + transformers = list(self._iter()) + + params = Bunch(fit=fit_params, fit_transform=fit_params) + + return Parallel(n_jobs=self.n_jobs)( + delayed(func)( + transformer, + X, + y, + weight, + message_clsname="FeatureUnion", + message=self._log_message(name, idx, len(transformers)), + params=params, + ) + for idx, (name, transformer, weight) in enumerate(transformers, 1) + ) + + def transform(self, X): + """Transform X separately by each transformer, concatenate results. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data to be transformed. + + Returns + ------- + X_t : array-like or sparse matrix of \ + shape (n_samples, sum_n_components) + The `hstack` of results of transformers. `sum_n_components` is the + sum of `n_components` (output dimension) over transformers. + """ + # TODO(SLEP6): accept **params here in `transform` and route it to the + # underlying estimators. + params = Bunch(transform={}) + Xs = Parallel(n_jobs=self.n_jobs)( + delayed(_transform_one)(trans, X, None, weight, params) + for name, trans, weight in self._iter() + ) + if not Xs: + # All transformers are None + return np.zeros((X.shape[0], 0)) + + return self._hstack(Xs) + + def _hstack(self, Xs): + adapter = _get_container_adapter("transform", self) + if adapter and all(adapter.is_supported_container(X) for X in Xs): + return adapter.hstack(Xs) + + if any(sparse.issparse(f) for f in Xs): + Xs = sparse.hstack(Xs).tocsr() + else: + Xs = np.hstack(Xs) + return Xs + + def _update_transformer_list(self, transformers): + transformers = iter(transformers) + self.transformer_list[:] = [ + (name, old if old == "drop" else next(transformers)) + for name, old in self.transformer_list + ] + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + + # X is passed to all transformers so we just delegate to the first one + return self.transformer_list[0][1].n_features_in_ + + @property + def feature_names_in_(self): + """Names of features seen during :term:`fit`.""" + # X is passed to all transformers -- delegate to the first one + return self.transformer_list[0][1].feature_names_in_ + + def __sklearn_is_fitted__(self): + # Delegate whether feature union was fitted + for _, transformer, _ in self._iter(): + check_is_fitted(transformer) + return True + + def _sk_visual_block_(self): + names, transformers = zip(*self.transformer_list) + return _VisualBlock("parallel", transformers, names=names) + + def __getitem__(self, name): + """Return transformer with name.""" + if not isinstance(name, str): + raise KeyError("Only string keys are supported") + return self.named_transformers[name] + + +def make_union(*transformers, n_jobs=None, verbose=False): + """Construct a :class:`FeatureUnion` from the given transformers. + + This is a shorthand for the :class:`FeatureUnion` constructor; it does not + require, and does not permit, naming the transformers. Instead, they will + be given names automatically based on their types. It also does not allow + weighting. + + Parameters + ---------- + *transformers : list of estimators + One or more estimators. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + Returns + ------- + f : FeatureUnion + A :class:`FeatureUnion` object for concatenating the results of multiple + transformer objects. + + See Also + -------- + FeatureUnion : Class for concatenating the results of multiple transformer + objects. + + Examples + -------- + >>> from sklearn.decomposition import PCA, TruncatedSVD + >>> from sklearn.pipeline import make_union + >>> make_union(PCA(), TruncatedSVD()) + FeatureUnion(transformer_list=[('pca', PCA()), + ('truncatedsvd', TruncatedSVD())]) + """ + return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/random_projection.py b/llmeval-env/lib/python3.10/site-packages/sklearn/random_projection.py new file mode 100644 index 0000000000000000000000000000000000000000..c8c0193ac9b0b762c1c249cfc8a082ed08188f8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/random_projection.py @@ -0,0 +1,810 @@ +"""Random Projection transformers. + +Random Projections are a simple and computationally efficient way to +reduce the dimensionality of the data by trading a controlled amount +of accuracy (as additional variance) for faster processing times and +smaller model sizes. + +The dimensions and distribution of Random Projections matrices are +controlled so as to preserve the pairwise distances between any two +samples of the dataset. + +The main theoretical result behind the efficiency of random projection is the +`Johnson-Lindenstrauss lemma (quoting Wikipedia) +`_: + + In mathematics, the Johnson-Lindenstrauss lemma is a result + concerning low-distortion embeddings of points from high-dimensional + into low-dimensional Euclidean space. The lemma states that a small set + of points in a high-dimensional space can be embedded into a space of + much lower dimension in such a way that distances between the points are + nearly preserved. The map used for the embedding is at least Lipschitz, + and can even be taken to be an orthogonal projection. + +""" +# Authors: Olivier Grisel , +# Arnaud Joly +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from .base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .exceptions import DataDimensionalityWarning +from .utils import check_random_state +from .utils._param_validation import Interval, StrOptions, validate_params +from .utils.extmath import safe_sparse_dot +from .utils.random import sample_without_replacement +from .utils.validation import check_array, check_is_fitted + +__all__ = [ + "SparseRandomProjection", + "GaussianRandomProjection", + "johnson_lindenstrauss_min_dim", +] + + +@validate_params( + { + "n_samples": ["array-like", Interval(Real, 1, None, closed="left")], + "eps": ["array-like", Interval(Real, 0, 1, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): + """Find a 'safe' number of components to randomly project to. + + The distortion introduced by a random projection `p` only changes the + distance between two points by a factor (1 +- eps) in a euclidean space + with good probability. The projection `p` is an eps-embedding as defined + by: + + (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 + + Where u and v are any rows taken from a dataset of shape (n_samples, + n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian + N(0, 1) matrix of shape (n_components, n_features) (or a sparse + Achlioptas matrix). + + The minimum number of components to guarantee the eps-embedding is + given by: + + n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) + + Note that the number of dimensions is independent of the original + number of features but instead depends on the size of the dataset: + the larger the dataset, the higher is the minimal dimensionality of + an eps-embedding. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or array-like of int + Number of samples that should be an integer greater than 0. If an array + is given, it will compute a safe number of components array-wise. + + eps : float or array-like of shape (n_components,), dtype=float, \ + default=0.1 + Maximum distortion rate in the range (0, 1) as defined by the + Johnson-Lindenstrauss lemma. If an array is given, it will compute a + safe number of components array-wise. + + Returns + ------- + n_components : int or ndarray of int + The minimal number of components to guarantee with good probability + an eps-embedding with n_samples. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma + + .. [2] `Sanjoy Dasgupta and Anupam Gupta, 1999, + "An elementary proof of the Johnson-Lindenstrauss Lemma." + `_ + + Examples + -------- + >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim + >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) + 663 + + >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) + array([ 663, 11841, 1112658]) + + >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) + array([ 7894, 9868, 11841]) + """ + eps = np.asarray(eps) + n_samples = np.asarray(n_samples) + + if np.any(eps <= 0.0) or np.any(eps >= 1): + raise ValueError("The JL bound is defined for eps in ]0, 1[, got %r" % eps) + + if np.any(n_samples <= 0): + raise ValueError( + "The JL bound is defined for n_samples greater than zero, got %r" + % n_samples + ) + + denominator = (eps**2 / 2) - (eps**3 / 3) + return (4 * np.log(n_samples) / denominator).astype(np.int64) + + +def _check_density(density, n_features): + """Factorize density check according to Li et al.""" + if density == "auto": + density = 1 / np.sqrt(n_features) + + elif density <= 0 or density > 1: + raise ValueError("Expected density in range ]0, 1], got: %r" % density) + return density + + +def _check_input_size(n_components, n_features): + """Factorize argument checking for random matrix generation.""" + if n_components <= 0: + raise ValueError( + "n_components must be strictly positive, got %d" % n_components + ) + if n_features <= 0: + raise ValueError("n_features must be strictly positive, got %d" % n_features) + + +def _gaussian_random_matrix(n_components, n_features, random_state=None): + """Generate a dense Gaussian random matrix. + + The components of the random matrix are drawn from + + N(0, 1.0 / n_components). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the matrix + at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + components : ndarray of shape (n_components, n_features) + The generated Gaussian random matrix. + + See Also + -------- + GaussianRandomProjection + """ + _check_input_size(n_components, n_features) + rng = check_random_state(random_state) + components = rng.normal( + loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features) + ) + return components + + +def _sparse_random_matrix(n_components, n_features, density="auto", random_state=None): + """Generalized Achlioptas random sparse matrix for random projection. + + Setting density to 1 / 3 will yield the original matrix by Dimitris + Achlioptas while setting a lower value will yield the generalization + by Ping Li et al. + + If we note :math:`s = 1 / density`, the components of the random matrix are + drawn from: + + - -sqrt(s) / sqrt(n_components) with probability 1 / 2s + - 0 with probability 1 - 1 / s + - +sqrt(s) / sqrt(n_components) with probability 1 / 2s + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + density : float or 'auto', default='auto' + Ratio of non-zero component in the random projection matrix in the + range `(0, 1]` + + If density = 'auto', the value is set to the minimum density + as recommended by Ping Li et al.: 1 / sqrt(n_features). + + Use density = 1 / 3.0 if you want to reproduce the results from + Achlioptas, 2001. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the matrix + at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + components : {ndarray, sparse matrix} of shape (n_components, n_features) + The generated Gaussian random matrix. Sparse matrix will be of CSR + format. + + See Also + -------- + SparseRandomProjection + + References + ---------- + + .. [1] Ping Li, T. Hastie and K. W. Church, 2006, + "Very Sparse Random Projections". + https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf + + .. [2] D. Achlioptas, 2001, "Database-friendly random projections", + https://cgi.di.uoa.gr/~optas/papers/jl.pdf + + """ + _check_input_size(n_components, n_features) + density = _check_density(density, n_features) + rng = check_random_state(random_state) + + if density == 1: + # skip index generation if totally dense + components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 + return 1 / np.sqrt(n_components) * components + + else: + # Generate location of non zero elements + indices = [] + offset = 0 + indptr = [offset] + for _ in range(n_components): + # find the indices of the non-zero components for row i + n_nonzero_i = rng.binomial(n_features, density) + indices_i = sample_without_replacement( + n_features, n_nonzero_i, random_state=rng + ) + indices.append(indices_i) + offset += n_nonzero_i + indptr.append(offset) + + indices = np.concatenate(indices) + + # Among non zero components the probability of the sign is 50%/50% + data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 + + # build the CSR structure by concatenating the rows + components = sp.csr_matrix( + (data, indices, indptr), shape=(n_components, n_features) + ) + + return np.sqrt(1 / density) / np.sqrt(n_components) * components + + +class BaseRandomProjection( + TransformerMixin, BaseEstimator, ClassNamePrefixFeaturesOutMixin, metaclass=ABCMeta +): + """Base class for random projections. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + StrOptions({"auto"}), + ], + "eps": [Interval(Real, 0, None, closed="neither")], + "compute_inverse_components": ["boolean"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + n_components="auto", + *, + eps=0.1, + compute_inverse_components=False, + random_state=None, + ): + self.n_components = n_components + self.eps = eps + self.compute_inverse_components = compute_inverse_components + self.random_state = random_state + + @abstractmethod + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + Returns + ------- + components : {ndarray, sparse matrix} of shape (n_components, n_features) + The generated random matrix. Sparse matrix will be of CSR format. + + """ + + def _compute_inverse_components(self): + """Compute the pseudo-inverse of the (densified) components.""" + components = self.components_ + if sp.issparse(components): + components = components.toarray() + return linalg.pinv(components, check_finite=False) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Generate a sparse random projection matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training set: only the shape is used to find optimal random + matrix dimensions based on the theory referenced in the + afore mentioned papers. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + BaseRandomProjection class instance. + """ + X = self._validate_data( + X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32] + ) + + n_samples, n_features = X.shape + + if self.n_components == "auto": + self.n_components_ = johnson_lindenstrauss_min_dim( + n_samples=n_samples, eps=self.eps + ) + + if self.n_components_ <= 0: + raise ValueError( + "eps=%f and n_samples=%d lead to a target dimension of " + "%d which is invalid" % (self.eps, n_samples, self.n_components_) + ) + + elif self.n_components_ > n_features: + raise ValueError( + "eps=%f and n_samples=%d lead to a target dimension of " + "%d which is larger than the original space with " + "n_features=%d" + % (self.eps, n_samples, self.n_components_, n_features) + ) + else: + if self.n_components > n_features: + warnings.warn( + "The number of components is higher than the number of" + " features: n_features < n_components (%s < %s)." + "The dimensionality of the problem will not be reduced." + % (n_features, self.n_components), + DataDimensionalityWarning, + ) + + self.n_components_ = self.n_components + + # Generate a projection matrix of size [n_components, n_features] + self.components_ = self._make_random_matrix( + self.n_components_, n_features + ).astype(X.dtype, copy=False) + + if self.compute_inverse_components: + self.inverse_components_ = self._compute_inverse_components() + + # Required by ClassNamePrefixFeaturesOutMixin.get_feature_names_out. + self._n_features_out = self.n_components + + return self + + def inverse_transform(self, X): + """Project data back to its original space. + + Returns an array X_original whose transform would be X. Note that even + if X is sparse, X_original is dense: this may use a lot of RAM. + + If `compute_inverse_components` is False, the inverse of the components is + computed during each call to `inverse_transform` which can be costly. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_components) + Data to be transformed back. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Reconstructed data. + """ + check_is_fitted(self) + + X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc")) + + if self.compute_inverse_components: + return X @ self.inverse_components_.T + + inverse_components = self._compute_inverse_components() + return X @ inverse_components.T + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } + + +class GaussianRandomProjection(BaseRandomProjection): + """Reduce dimensionality through Gaussian random projection. + + The components of the random matrix are drawn from N(0, 1 / n_components). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int or 'auto', default='auto' + Dimensionality of the target projection space. + + n_components can be automatically adjusted according to the + number of samples in the dataset and the bound given by the + Johnson-Lindenstrauss lemma. In that case the quality of the + embedding is controlled by the ``eps`` parameter. + + It should be noted that Johnson-Lindenstrauss lemma can yield + very conservative estimated of the required number of components + as it makes no assumption on the structure of the dataset. + + eps : float, default=0.1 + Parameter to control the quality of the embedding according to + the Johnson-Lindenstrauss lemma when `n_components` is set to + 'auto'. The value should be strictly positive. + + Smaller values lead to better embedding and higher number of + dimensions (n_components) in the target projection space. + + compute_inverse_components : bool, default=False + Learn the inverse transform by computing the pseudo-inverse of the + components during fit. Note that computing the pseudo-inverse does not + scale well to large matrices. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the + projection matrix at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + n_components_ : int + Concrete number of components computed when n_components="auto". + + components_ : ndarray of shape (n_components, n_features) + Random matrix used for the projection. + + inverse_components_ : ndarray of shape (n_features, n_components) + Pseudo-inverse of the components, only computed if + `compute_inverse_components` is True. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SparseRandomProjection : Reduce dimensionality through sparse + random projection. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.random_projection import GaussianRandomProjection + >>> rng = np.random.RandomState(42) + >>> X = rng.rand(25, 3000) + >>> transformer = GaussianRandomProjection(random_state=rng) + >>> X_new = transformer.fit_transform(X) + >>> X_new.shape + (25, 2759) + """ + + def __init__( + self, + n_components="auto", + *, + eps=0.1, + compute_inverse_components=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + eps=eps, + compute_inverse_components=compute_inverse_components, + random_state=random_state, + ) + + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + Returns + ------- + components : ndarray of shape (n_components, n_features) + The generated random matrix. + """ + random_state = check_random_state(self.random_state) + return _gaussian_random_matrix( + n_components, n_features, random_state=random_state + ) + + def transform(self, X): + """Project the data by using matrix product with the random matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data to project into a smaller dimensional space. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Projected array. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32] + ) + + return X @ self.components_.T + + +class SparseRandomProjection(BaseRandomProjection): + """Reduce dimensionality through sparse random projection. + + Sparse random matrix is an alternative to dense random + projection matrix that guarantees similar embedding quality while being + much more memory efficient and allowing faster computation of the + projected data. + + If we note `s = 1 / density` the components of the random matrix are + drawn from: + + - -sqrt(s) / sqrt(n_components) with probability 1 / 2s + - 0 with probability 1 - 1 / s + - +sqrt(s) / sqrt(n_components) with probability 1 / 2s + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int or 'auto', default='auto' + Dimensionality of the target projection space. + + n_components can be automatically adjusted according to the + number of samples in the dataset and the bound given by the + Johnson-Lindenstrauss lemma. In that case the quality of the + embedding is controlled by the ``eps`` parameter. + + It should be noted that Johnson-Lindenstrauss lemma can yield + very conservative estimated of the required number of components + as it makes no assumption on the structure of the dataset. + + density : float or 'auto', default='auto' + Ratio in the range (0, 1] of non-zero component in the random + projection matrix. + + If density = 'auto', the value is set to the minimum density + as recommended by Ping Li et al.: 1 / sqrt(n_features). + + Use density = 1 / 3.0 if you want to reproduce the results from + Achlioptas, 2001. + + eps : float, default=0.1 + Parameter to control the quality of the embedding according to + the Johnson-Lindenstrauss lemma when n_components is set to + 'auto'. This value should be strictly positive. + + Smaller values lead to better embedding and higher number of + dimensions (n_components) in the target projection space. + + dense_output : bool, default=False + If True, ensure that the output of the random projection is a + dense numpy array even if the input and random projection matrix + are both sparse. In practice, if the number of components is + small the number of zero components in the projected data will + be very small and it will be more CPU and memory efficient to + use a dense representation. + + If False, the projected data uses a sparse representation if + the input is sparse. + + compute_inverse_components : bool, default=False + Learn the inverse transform by computing the pseudo-inverse of the + components during fit. Note that the pseudo-inverse is always a dense + array, even if the training data was sparse. This means that it might be + necessary to call `inverse_transform` on a small batch of samples at a + time to avoid exhausting the available memory on the host. Moreover, + computing the pseudo-inverse does not scale well to large matrices. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the + projection matrix at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + n_components_ : int + Concrete number of components computed when n_components="auto". + + components_ : sparse matrix of shape (n_components, n_features) + Random matrix used for the projection. Sparse matrix will be of CSR + format. + + inverse_components_ : ndarray of shape (n_features, n_components) + Pseudo-inverse of the components, only computed if + `compute_inverse_components` is True. + + .. versionadded:: 1.1 + + density_ : float in range 0.0 - 1.0 + Concrete density computed from when density = "auto". + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianRandomProjection : Reduce dimensionality through Gaussian + random projection. + + References + ---------- + + .. [1] Ping Li, T. Hastie and K. W. Church, 2006, + "Very Sparse Random Projections". + https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf + + .. [2] D. Achlioptas, 2001, "Database-friendly random projections", + https://cgi.di.uoa.gr/~optas/papers/jl.pdf + + Examples + -------- + >>> import numpy as np + >>> from sklearn.random_projection import SparseRandomProjection + >>> rng = np.random.RandomState(42) + >>> X = rng.rand(25, 3000) + >>> transformer = SparseRandomProjection(random_state=rng) + >>> X_new = transformer.fit_transform(X) + >>> X_new.shape + (25, 2759) + >>> # very few components are non-zero + >>> np.mean(transformer.components_ != 0) + 0.0182... + """ + + _parameter_constraints: dict = { + **BaseRandomProjection._parameter_constraints, + "density": [Interval(Real, 0.0, 1.0, closed="right"), StrOptions({"auto"})], + "dense_output": ["boolean"], + } + + def __init__( + self, + n_components="auto", + *, + density="auto", + eps=0.1, + dense_output=False, + compute_inverse_components=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + eps=eps, + compute_inverse_components=compute_inverse_components, + random_state=random_state, + ) + + self.dense_output = dense_output + self.density = density + + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix + + Parameters + ---------- + n_components : int + Dimensionality of the target projection space. + + n_features : int + Dimensionality of the original source space. + + Returns + ------- + components : sparse matrix of shape (n_components, n_features) + The generated random matrix in CSR format. + + """ + random_state = check_random_state(self.random_state) + self.density_ = _check_density(self.density, n_features) + return _sparse_random_matrix( + n_components, n_features, density=self.density_, random_state=random_state + ) + + def transform(self, X): + """Project the data by using matrix product with the random matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data to project into a smaller dimensional space. + + Returns + ------- + X_new : {ndarray, sparse matrix} of shape (n_samples, n_components) + Projected array. It is a sparse matrix only when the input is sparse and + `dense_output = False`. + """ + check_is_fitted(self) + X = self._validate_data( + X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32] + ) + + return safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..640b66e9f96cf8920639b5a109aa7fc951252a8d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA @@ -0,0 +1,209 @@ +Metadata-Version: 2.3 +Name: tokenizers +Version: 0.19.1 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Dist: huggingface-hub >=0.16.4, <1.0 +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: requests ; extra == 'testing' +Requires-Dist: numpy ; extra == 'testing' +Requires-Dist: datasets ; extra == 'testing' +Requires-Dist: black ==22.3 ; extra == 'testing' +Requires-Dist: ruff ; extra == 'testing' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: sphinx-rtd-theme ; extra == 'docs' +Requires-Dist: setuptools-rust ; extra == 'docs' +Requires-Dist: tokenizers[testing] ; extra == 'dev' +Provides-Extra: testing +Provides-Extra: docs +Provides-Extra: dev +Keywords: NLP,tokenizer,BPE,transformer,deep learning +Author: Anthony MOI +Author-email: Nicolas Patry , Anthony Moi +Requires-Python: >=3.7 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Homepage, https://github.com/huggingface/tokenizers +Project-URL: Source, https://github.com/huggingface/tokenizers + +

+
+ +
+

+

+ + Build + + + GitHub + +

+
+ +# Tokenizers + +Provides an implementation of today's most used tokenizers, with a focus on performance and +versatility. + +Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation. +If you are interested in the High-level design, you can go check it there. + +Otherwise, let's dive in! + +## Main features: + + - Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3 + most common BPE versions). + - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes + less than 20 seconds to tokenize a GB of text on a server's CPU. + - Easy to use, but also extremely versatile. + - Designed for research and production. + - Normalization comes with alignments tracking. It's always possible to get the part of the + original sentence that corresponds to a given token. + - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. + +### Installation + +#### With pip: + +```bash +pip install tokenizers +``` + +#### From sources: + +To use this method, you need to have the Rust installed: + +```bash +# Install with: +curl https://sh.rustup.rs -sSf | sh -s -- -y +export PATH="$HOME/.cargo/bin:$PATH" +``` + +Once Rust is installed, you can compile doing the following + +```bash +git clone https://github.com/huggingface/tokenizers +cd tokenizers/bindings/python + +# Create a virtual env (you can use yours as well) +python -m venv .env +source .env/bin/activate + +# Install `tokenizers` in the current virtual env +pip install -e . +``` + +### Load a pretrained tokenizer from the Hub + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_pretrained("bert-base-cased") +``` + +### Using the provided Tokenizers + +We provide some pre-build tokenizers to cover the most common cases. You can easily load one of +these using some `vocab.json` and `merges.txt` files: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +vocab = "./path/to/vocab.json" +merges = "./path/to/merges.txt" +tokenizer = CharBPETokenizer(vocab, merges) + +# And then encode: +encoded = tokenizer.encode("I can feel the magic, can you?") +print(encoded.ids) +print(encoded.tokens) +``` + +And you can train them just as simply: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +tokenizer = CharBPETokenizer() + +# Then train it! +tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ]) + +# Now, let's use it: +encoded = tokenizer.encode("I can feel the magic, can you?") + +# And finally save it somewhere +tokenizer.save("./path/to/directory/my-bpe.tokenizer.json") +``` + +#### Provided Tokenizers + + - `CharBPETokenizer`: The original BPE + - `ByteLevelBPETokenizer`: The byte level version of the BPE + - `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece + - `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece + +All of these can be used and trained as explained above! + +### Build your own + +Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer, +by putting all the different parts you need together. +You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs. + +#### Building a byte-level BPE + +Here is an example showing how to build your own byte-level BPE by putting all the different pieces +together, and then saving it to a single file: + +```python +from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors + +# Initialize a tokenizer +tokenizer = Tokenizer(models.BPE()) + +# Customize pre-tokenization and decoding +tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) +tokenizer.decoder = decoders.ByteLevel() +tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) + +# And then train +trainer = trainers.BpeTrainer( + vocab_size=20000, + min_frequency=2, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() +) +tokenizer.train([ + "./path/to/dataset/1.txt", + "./path/to/dataset/2.txt", + "./path/to/dataset/3.txt" +], trainer=trainer) + +# And Save it +tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True) +``` + +Now, when you want to use this tokenizer, this is as simple as: + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json") + +encoded = tokenizer.encode("I can feel the magic, can you?") +``` + diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..88de5d48c791917c7be67052af626dddf5ce3967 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD @@ -0,0 +1,45 @@ +tokenizers-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tokenizers-0.19.1.dist-info/METADATA,sha256=rCAgg9DA_ZsETxHzh_dz0hPeRKTvoj9m9kUNZe14vxc,6719 +tokenizers-0.19.1.dist-info/RECORD,, +tokenizers-0.19.1.dist-info/WHEEL,sha256=JL8sd1C0RQ2f7cmwbAn1Jp257v_vSS2r0VvTBpJeZwA,129 +tokenizers/__init__.py,sha256=ZE5ZagUvobBScrHBQdEobhx4wqM0bsq9F9aLYkBNjYQ,2615 +tokenizers/__init__.pyi,sha256=YBIWZCSN4Rs_-yKdEwhVv77bgHRE36hX9iwFrWGMJ8E,38536 +tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/decoders/__init__.py,sha256=lGp32h8qerE0F48gyZL8wGmeQVlmjVpeIsRb1SM9kf4,335 +tokenizers/decoders/__init__.pyi,sha256=xsReo7OFRCiQ4bBZY9ogYb1iLJ5DTgI5elNB-Uggocs,7244 +tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310 +tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,, +tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,, +tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,, +tokenizers/implementations/base_tokenizer.py,sha256=2TFZhLupaJiMDYGJuUNmxYJv-cnR8bDHmbMzaYpFROs,14206 +tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520 +tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289 +tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466 +tokenizers/implementations/sentencepiece_bpe.py,sha256=LwrofoohnUfME2lK2lQYoyQIhP84RP0CIlHRaj0hyNs,3738 +tokenizers/implementations/sentencepiece_unigram.py,sha256=SYiVXL8ZtqLXKpuqwnwmrfxgGotu8yAkOu7dLztEXIo,7580 +tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176 +tokenizers/models/__init__.pyi,sha256=wH4M-ZZprw3UQ98fxWrF3MpivuNVY3s3pv4pGY0A_kE,16932 +tokenizers/models/__pycache__/__init__.cpython-310.pyc,, +tokenizers/normalizers/__init__.py,sha256=hKOwnqWM-IlcVv7HDWT9SYhlczevuCNDQJY05ZFxkzk,808 +tokenizers/normalizers/__init__.pyi,sha256=5SGm-u896MZht6TXMS9sWv1lCATnwNqbC2Udl5aP4dg,19597 +tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557 +tokenizers/pre_tokenizers/__init__.pyi,sha256=IhF7dZt9_9_WM2ESKwEIvN59uW_YzS2PzmWBUScysWU,23258 +tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307 +tokenizers/processors/__init__.pyi,sha256=hx767ZY8SHhxb_hiXPRxm-f_KcoR4XDx7vfK2c0lR-Q,11357 +tokenizers/processors/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so,sha256=Pkcy8QzpfJ9ekDd03LlTwPFRj0Cyfih5kgdTEYNc784,11815960 +tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55 +tokenizers/tools/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,, +tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850 +tokenizers/tools/visualizer.py,sha256=gi-E2NCP7FuG6ujpQOdalSTXUlaV85V6NI-ZPPTvA_4,14625 +tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248 +tokenizers/trainers/__init__.pyi,sha256=3TwFKts4me7zQfVRcSTmtXYiP4XwcRjfAYtwqoZVtoQ,5382 +tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,, diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..efc3fe1c2443c99179e12648847b89e865477d5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.5.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64 diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6954f79f1e1350c21b1edaf40979f431076140f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc8f0c3ac18957fc39a4444732c0246782c7e88 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a10a8c95bb2c628e05024791982ce5bcca168847 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f1f6c2cf916584d4c60b02f553a1006216d5dd6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4abceb1984c72307b5dbf2c0fc676a5f382cf9cb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873a83440783c7dd0309a26bdf2b6caddff6fc73 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..820f11e34c7c912e02c04ad857184b1f715c068e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c549e9de42c410d10944ac02f6c08e6cb2d8001 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0687e9e529fbd6cb267a36b52cfa645d7192874a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..668ff14e7d4be3bee0381b40885bb3ed622e9a75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tqdm/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.py b/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..043c486ddf9d2770d7fbee4bff1c26985ac6cd7e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.py @@ -0,0 +1,63 @@ +from ._xxhash import ( + xxh32, + xxh32_digest, + xxh32_intdigest, + xxh32_hexdigest, + xxh64, + xxh64_digest, + xxh64_intdigest, + xxh64_hexdigest, + xxh3_64, + xxh3_64_digest, + xxh3_64_intdigest, + xxh3_64_hexdigest, + xxh3_128, + xxh3_128_digest, + xxh3_128_intdigest, + xxh3_128_hexdigest, + XXHASH_VERSION, +) + +from .version import VERSION, VERSION_TUPLE + + +xxh128 = xxh3_128 +xxh128_hexdigest = xxh3_128_hexdigest +xxh128_intdigest = xxh3_128_intdigest +xxh128_digest = xxh3_128_digest + +algorithms_available = set([ + "xxh32", + "xxh64", + "xxh3_64", + "xxh128", + "xxh3_128", +]) + + +__all__ = [ + "xxh32", + "xxh32_digest", + "xxh32_intdigest", + "xxh32_hexdigest", + "xxh64", + "xxh64_digest", + "xxh64_intdigest", + "xxh64_hexdigest", + "xxh3_64", + "xxh3_64_digest", + "xxh3_64_intdigest", + "xxh3_64_hexdigest", + "xxh3_128", + "xxh3_128_digest", + "xxh3_128_intdigest", + "xxh3_128_hexdigest", + "xxh128", + "xxh128_digest", + "xxh128_intdigest", + "xxh128_hexdigest", + "VERSION", + "VERSION_TUPLE", + "XXHASH_VERSION", + "algorithms_available", +] diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..03c62497a3aba0fc5695ee033c0a402e41ae8c44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash/__init__.pyi @@ -0,0 +1,62 @@ +import array +from typing import Union +from typing_extensions import final + +_InputType = Union[str, bytes, bytearray, memoryview, array.ArrayType[int]] + +VERSION: str +XXHASH_VERSION: str +VERSION_TUPLE: tuple[int, ...] + +algorithms_available: set[str] + +class _Hasher: + def __init__(self, input: _InputType = ..., seed: int = ...) -> None: ... + def update(self, input: _InputType) -> None: ... + def digest(self) -> bytes: ... + def hexdigest(self) -> str: ... + def intdigest(self) -> int: ... + def copy(self) -> _Hasher: ... + def reset(self) -> None: ... + @property + def digestsize(self) -> int: ... + @property + def digest_size(self) -> int: ... + @property + def block_size(self) -> int: ... + @property + def name(self) -> str: ... + @property + def seed(self) -> int: ... + +@final +class xxh32(_Hasher): ... + +@final +class xxh3_64(_Hasher): ... + +@final +class xxh3_128(_Hasher): ... + +xxh64 = xxh3_64 +xxh128 = xxh3_128 + +def xxh32_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh32_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh32_intdigest(args: _InputType, seed: int = ...) -> int: ... + +def xxh3_64_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh3_64_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh3_64_intdigest(args: _InputType, seed: int = ...) -> int: ... + +def xxh3_128_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh3_128_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh3_128_intdigest(args: _InputType, seed: int = ...) -> int: ... + +xxh64_digest = xxh3_64_digest +xxh64_hexdigest = xxh3_64_hexdigest +xxh64_intdigest = xxh3_64_intdigest + +xxh128_digest = xxh3_128_digest +xxh128_hexdigest = xxh3_128_hexdigest +xxh128_intdigest = xxh3_128_intdigest diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb0a2dbc1180a43d6248ca017387ed1b99c4b5d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca7a045bef51c55308ff7e637b8c411b93a83920 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3a2e50404b3555ed22c1e112f54c4c6dd85d4a77 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/py.typed b/llmeval-env/lib/python3.10/site-packages/xxhash/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash/version.py b/llmeval-env/lib/python3.10/site-packages/xxhash/version.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ca3e3bf151e33af0c511167c590e767caa2528 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash/version.py @@ -0,0 +1,2 @@ +VERSION = "3.4.1" +VERSION_TUPLE = (3, 4, 1)