diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate/saving.py b/env-llmeval/lib/python3.10/site-packages/evaluate/saving.py new file mode 100644 index 0000000000000000000000000000000000000000..4eea1a6a0c63551f0db9302130c7f5a7acced4fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate/saving.py @@ -0,0 +1,73 @@ +import json +import os +import subprocess +import sys +from datetime import datetime +from pathlib import Path + +from datasets.utils.filelock import FileLock + +from . import __version__ + + +def save(path_or_file, **data): + """ + Saves results to a JSON file. Also saves system information such as current time, current commit + hash if inside a repository, and Python system information. + + Args: + path_or_file (`str`): + Path or file to store the file. If only a folder is provided + the results file will be saved in the format `"result-%Y_%m_%d-%H_%M_%S.json"`. + + Example: + ```py + >>> import evaluate + >>> result = {"bleu": 0.7} + >>> params = {"model": "gpt-2"} + >>> evaluate.save("./results/", **result, **params) + ``` + """ + current_time = datetime.now() + + file_path = _setup_path(path_or_file, current_time) + + data["_timestamp"] = current_time.isoformat() + data["_git_commit_hash"] = _git_commit_hash() + data["_evaluate_version"] = __version__ + data["_python_version"] = sys.version + data["_interpreter_path"] = sys.executable + + with FileLock(str(file_path) + ".lock"): + with open(file_path, "w") as f: + json.dump(data, f) + + # cleanup lock file + try: + os.remove(str(file_path) + ".lock") + except FileNotFoundError: + pass + + return file_path + + +def _setup_path(path_or_file, current_time): + path_or_file = Path(path_or_file) + is_file = len(path_or_file.suffix) > 0 + if is_file: + folder = path_or_file.parent + file_name = path_or_file.name + else: + folder = path_or_file + file_name = "result-" + current_time.strftime("%Y_%m_%d-%H_%M_%S") + ".json" + folder.mkdir(parents=True, exist_ok=True) + return folder / file_name + + +def _git_commit_hash(): + res = subprocess.run("git rev-parse --is-inside-work-tree".split(), cwd="./", stdout=subprocess.PIPE) + if res.stdout.decode().strip() == "true": + res = subprocess.run("git rev-parse HEAD".split(), cwd=os.getcwd(), stdout=subprocess.PIPE) + return res.stdout.decode().strip() + else: + return None diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..aabd4f7520077f5f9c18bae61ce8ab5754bc57fd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA @@ -0,0 +1,36 @@ +Metadata-Version: 2.1 +Name: nvidia-cusparse-cu12 +Version: 12.1.0.106 +Summary: CUSPARSE native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-nvjitlink-cu12 + +CUSPARSE native runtime libraries diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2cf8818e2b166fe605908388de295e1815bc92eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD @@ -0,0 +1,17 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/cusparse.h,sha256=yhV9iTcEW9XEyhaJmX4iddh_cMb8sfNAy6qva5ae4qw,287290 +nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587 +nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/lib/libcusparse.so.12,sha256=UARmovVZ3mIqcbuSDT0pI-aRNSRXR6J0LuE-3_C6YIU,264876688 +nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA,sha256=XpBtE4L1lFCx7gDu7Klx9dijNWQW26PS3fcOGjNIsXg,1550 +nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD,, +nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2805070eef1d77567ecf094aa08049d0b0a797 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__init__.py @@ -0,0 +1,347 @@ +r""" +================================== +Constants (:mod:`scipy.constants`) +================================== + +.. currentmodule:: scipy.constants + +Physical and mathematical constants and units. + + +Mathematical constants +====================== + +================ ================================================================= +``pi`` Pi +``golden`` Golden ratio +``golden_ratio`` Golden ratio +================ ================================================================= + + +Physical constants +================== + +=========================== ================================================================= +``c`` speed of light in vacuum +``speed_of_light`` speed of light in vacuum +``mu_0`` the magnetic constant :math:`\mu_0` +``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` +``h`` the Planck constant :math:`h` +``Planck`` the Planck constant :math:`h` +``hbar`` :math:`\hbar = h/(2\pi)` +``G`` Newtonian constant of gravitation +``gravitational_constant`` Newtonian constant of gravitation +``g`` standard acceleration of gravity +``e`` elementary charge +``elementary_charge`` elementary charge +``R`` molar gas constant +``gas_constant`` molar gas constant +``alpha`` fine-structure constant +``fine_structure`` fine-structure constant +``N_A`` Avogadro constant +``Avogadro`` Avogadro constant +``k`` Boltzmann constant +``Boltzmann`` Boltzmann constant +``sigma`` Stefan-Boltzmann constant :math:`\sigma` +``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` +``Wien`` Wien displacement law constant +``Rydberg`` Rydberg constant +``m_e`` electron mass +``electron_mass`` electron mass +``m_p`` proton mass +``proton_mass`` proton mass +``m_n`` neutron mass +``neutron_mass`` neutron mass +=========================== ================================================================= + + +Constants database +------------------ + +In addition to the above variables, :mod:`scipy.constants` also contains the +2018 CODATA recommended values [CODATA2018]_ database containing more physical +constants. + +.. autosummary:: + :toctree: generated/ + + value -- Value in physical_constants indexed by key + unit -- Unit in physical_constants indexed by key + precision -- Relative precision in physical_constants indexed by key + find -- Return list of physical_constant keys with a given string + ConstantWarning -- Constant sought not in newest CODATA data set + +.. data:: physical_constants + + Dictionary of physical constants, of the format + ``physical_constants[name] = (value, unit, uncertainty)``. + +Available constants: + +====================================================================== ==== +%(constant_names)s +====================================================================== ==== + + +Units +===== + +SI prefixes +----------- + +============ ================================================================= +``quetta`` :math:`10^{30}` +``ronna`` :math:`10^{27}` +``yotta`` :math:`10^{24}` +``zetta`` :math:`10^{21}` +``exa`` :math:`10^{18}` +``peta`` :math:`10^{15}` +``tera`` :math:`10^{12}` +``giga`` :math:`10^{9}` +``mega`` :math:`10^{6}` +``kilo`` :math:`10^{3}` +``hecto`` :math:`10^{2}` +``deka`` :math:`10^{1}` +``deci`` :math:`10^{-1}` +``centi`` :math:`10^{-2}` +``milli`` :math:`10^{-3}` +``micro`` :math:`10^{-6}` +``nano`` :math:`10^{-9}` +``pico`` :math:`10^{-12}` +``femto`` :math:`10^{-15}` +``atto`` :math:`10^{-18}` +``zepto`` :math:`10^{-21}` +``yocto`` :math:`10^{-24}` +``ronto`` :math:`10^{-27}` +``quecto`` :math:`10^{-30}` +============ ================================================================= + +Binary prefixes +--------------- + +============ ================================================================= +``kibi`` :math:`2^{10}` +``mebi`` :math:`2^{20}` +``gibi`` :math:`2^{30}` +``tebi`` :math:`2^{40}` +``pebi`` :math:`2^{50}` +``exbi`` :math:`2^{60}` +``zebi`` :math:`2^{70}` +``yobi`` :math:`2^{80}` +============ ================================================================= + +Mass +---- + +================= ============================================================ +``gram`` :math:`10^{-3}` kg +``metric_ton`` :math:`10^{3}` kg +``grain`` one grain in kg +``lb`` one pound (avoirdupous) in kg +``pound`` one pound (avoirdupous) in kg +``blob`` one inch version of a slug in kg (added in 1.0.0) +``slinch`` one inch version of a slug in kg (added in 1.0.0) +``slug`` one slug in kg (added in 1.0.0) +``oz`` one ounce in kg +``ounce`` one ounce in kg +``stone`` one stone in kg +``grain`` one grain in kg +``long_ton`` one long ton in kg +``short_ton`` one short ton in kg +``troy_ounce`` one Troy ounce in kg +``troy_pound`` one Troy pound in kg +``carat`` one carat in kg +``m_u`` atomic mass constant (in kg) +``u`` atomic mass constant (in kg) +``atomic_mass`` atomic mass constant (in kg) +================= ============================================================ + +Angle +----- + +================= ============================================================ +``degree`` degree in radians +``arcmin`` arc minute in radians +``arcminute`` arc minute in radians +``arcsec`` arc second in radians +``arcsecond`` arc second in radians +================= ============================================================ + + +Time +---- + +================= ============================================================ +``minute`` one minute in seconds +``hour`` one hour in seconds +``day`` one day in seconds +``week`` one week in seconds +``year`` one year (365 days) in seconds +``Julian_year`` one Julian year (365.25 days) in seconds +================= ============================================================ + + +Length +------ + +===================== ============================================================ +``inch`` one inch in meters +``foot`` one foot in meters +``yard`` one yard in meters +``mile`` one mile in meters +``mil`` one mil in meters +``pt`` one point in meters +``point`` one point in meters +``survey_foot`` one survey foot in meters +``survey_mile`` one survey mile in meters +``nautical_mile`` one nautical mile in meters +``fermi`` one Fermi in meters +``angstrom`` one Angstrom in meters +``micron`` one micron in meters +``au`` one astronomical unit in meters +``astronomical_unit`` one astronomical unit in meters +``light_year`` one light year in meters +``parsec`` one parsec in meters +===================== ============================================================ + +Pressure +-------- + +================= ============================================================ +``atm`` standard atmosphere in pascals +``atmosphere`` standard atmosphere in pascals +``bar`` one bar in pascals +``torr`` one torr (mmHg) in pascals +``mmHg`` one torr (mmHg) in pascals +``psi`` one psi in pascals +================= ============================================================ + +Area +---- + +================= ============================================================ +``hectare`` one hectare in square meters +``acre`` one acre in square meters +================= ============================================================ + + +Volume +------ + +=================== ======================================================== +``liter`` one liter in cubic meters +``litre`` one liter in cubic meters +``gallon`` one gallon (US) in cubic meters +``gallon_US`` one gallon (US) in cubic meters +``gallon_imp`` one gallon (UK) in cubic meters +``fluid_ounce`` one fluid ounce (US) in cubic meters +``fluid_ounce_US`` one fluid ounce (US) in cubic meters +``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters +``bbl`` one barrel in cubic meters +``barrel`` one barrel in cubic meters +=================== ======================================================== + +Speed +----- + +================== ========================================================== +``kmh`` kilometers per hour in meters per second +``mph`` miles per hour in meters per second +``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second +``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second +``knot`` one knot in meters per second +================== ========================================================== + + +Temperature +----------- + +===================== ======================================================= +``zero_Celsius`` zero of Celsius scale in Kelvin +``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins +===================== ======================================================= + +.. autosummary:: + :toctree: generated/ + + convert_temperature + +Energy +------ + +==================== ======================================================= +``eV`` one electron volt in Joules +``electron_volt`` one electron volt in Joules +``calorie`` one calorie (thermochemical) in Joules +``calorie_th`` one calorie (thermochemical) in Joules +``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules +``erg`` one erg in Joules +``Btu`` one British thermal unit (International Steam Table) in Joules +``Btu_IT`` one British thermal unit (International Steam Table) in Joules +``Btu_th`` one British thermal unit (thermochemical) in Joules +``ton_TNT`` one ton of TNT in Joules +==================== ======================================================= + +Power +----- + +==================== ======================================================= +``hp`` one horsepower in watts +``horsepower`` one horsepower in watts +==================== ======================================================= + +Force +----- + +==================== ======================================================= +``dyn`` one dyne in newtons +``dyne`` one dyne in newtons +``lbf`` one pound force in newtons +``pound_force`` one pound force in newtons +``kgf`` one kilogram force in newtons +``kilogram_force`` one kilogram force in newtons +==================== ======================================================= + +Optics +------ + +.. autosummary:: + :toctree: generated/ + + lambda2nu + nu2lambda + +References +========== + +.. [CODATA2018] CODATA Recommended Values of the Fundamental + Physical Constants 2018. + + https://physics.nist.gov/cuu/Constants/ + +""" # noqa: E501 +# Modules contributed by BasSw (wegwerp@gmail.com) +from ._codata import * +from ._constants import * +from ._codata import _obsolete_constants, physical_constants + +# Deprecated namespaces, to be removed in v2.0.0 +from . import codata, constants + +_constant_names_list = [(_k.lower(), _k, _v) + for _k, _v in physical_constants.items() + if _k not in _obsolete_constants] +_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])), + _x[2][0], _x[2][1]) + for _x in sorted(_constant_names_list)]) +if __doc__: + __doc__ = __doc__ % dict(constant_names=_constant_names) + +del _constant_names +del _constant_names_list + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83f8b7090e88ed4cb71348dc0597fdf2f8ad111 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de146ae11670ad4c4fad602ceb918ab908019405 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71e6554ae2ddce211a7c7ec5a4da07c71070f0e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2411ee5c3e3e68a371a6cbd39852ac60badf5e45 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e698011ff39ffbc0b40f9c0c831e3b743c14cfb7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/_codata.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/_codata.py new file mode 100644 index 0000000000000000000000000000000000000000..0f2fd4580fac82e53ed372219e77ddf843f2c68b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/_codata.py @@ -0,0 +1,1748 @@ +""" +Fundamental Physical Constants +------------------------------ + +These constants are taken from CODATA Recommended Values of the Fundamental +Physical Constants 2018. + +Object +------ +physical_constants : dict + A dictionary containing physical constants. Keys are the names of physical + constants, values are tuples (value, units, precision). + +Functions +--------- +value(key): + Returns the value of the physical constant(key). +unit(key): + Returns the units of the physical constant(key). +precision(key): + Returns the relative precision of the physical constant(key). +find(sub): + Prints or returns list of keys containing the string sub, default is all. + +Source +------ +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. Also available is an introduction to the +constants for non-experts at + +https://physics.nist.gov/cuu/Constants/introduction.html + +References +---------- +Theoretical and experimental publications relevant to the fundamental constants +and closely related precision measurements published since the mid 1980s, but +also including many older papers of particular interest, some of which date +back to the 1800s. To search the bibliography, visit + +https://physics.nist.gov/cuu/Constants/ + +""" + +# Compiled by Charles Harris, dated October 3, 2002 +# updated to 2002 values by BasSw, 2006 +# Updated to 2006 values by Vincent Davis June 2010 +# Updated to 2014 values by Joseph Booker, 2015 +# Updated to 2018 values by Jakob Jakobson, 2019 + +from __future__ import annotations + +import warnings + +from typing import Any + +__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning'] + +""" +Source: https://physics.nist.gov/cuu/Constants/ + +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. +""" + +# +# Source: https://physics.nist.gov/cuu/Constants/ +# + +# Quantity Value Uncertainty Unit +# ---------------------------------------------------- --------------------- -------------------- ------------- +txt2002 = """\ +Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K +atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 +atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m +atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 +atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 +atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 +atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T +deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 +deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 +deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 +deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 +deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 +deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 +electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 +electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 +electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 +electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 +electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 +electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 +electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 +electron to shielded helion magn. moment ratio 864.058 255 0.000 010 +electron-deuteron magn. moment ratio -2143.923 493 0.000 023 +electron-muon magn. moment ratio 206.766 9894 0.000 0054 +electron-neutron magn. moment ratio 960.920 50 0.000 23 +electron-proton magn. moment ratio -658.210 6862 0.000 0066 +magn. constant 12.566 370 614...e-7 0 N A^-2 +magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb +muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 +muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 +muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 +muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 +neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 +neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 +neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 +neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 +neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 +neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 +neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 +proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 +proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 +proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 +proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 +proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 +proton magn. shielding correction 25.689e-6 0.015e-6 +proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 +shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 +shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 +shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 +shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 +shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 +shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 +{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m""" + +txt2006 = """\ +lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m +alpha particle-electron mass ratio 7294.299 5365 0.000 0031 +alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg +alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41 +Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV +atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV +atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K +atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C +atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3 +atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A +atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m +atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1 +atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 383 86 0.000 000 68 V +atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2 +atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J +atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N +atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T +atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2 +atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s +atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1 +Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1 +Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1 +Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 +Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m +Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m +Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m +conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9654 0.000 0016 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg +deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J +deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV +deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u +deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22 +deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4 +electron g factor -2.002 319 304 3622 0.000 000 000 0015 +electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1 +electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74 +electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80 +electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J +electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV +electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u +electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9877 0.000 0052 +electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4 +electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4 +electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u +electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h +electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz +electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1 +electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K +electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg +elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C +elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1 +Faraday constant 96 485.3399 0.0024 C mol^-1 +Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1 +Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2 +fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3 +first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u +hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV +Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J +Hartree energy in eV 27.211 383 86 0.000 000 68 eV +hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz +hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1 +hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J +hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K +hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg +helion-electron mass ratio 5495.885 2765 0.000 0052 +helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg +helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J +helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV +helion mass in u 3.014 932 2473 0.000 000 0026 u +helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6713 0.000 000 0026 +hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u +hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV +hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J +hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K +hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg +inverse fine-structure constant 137.035 999 679 0.000 000 094 +inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u +inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV +inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J +inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K +inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg +inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm +Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u +joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV +joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h +joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz +joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1 +joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u +kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV +kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h +kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz +kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 +kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J +kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u +kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV +kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h +kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz +kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K +lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb +molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1 +molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1 +Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2823 0.000 0052 +muon g factor -2.002 331 8414 0.000 000 0012 +muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23 +muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg +muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J +muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV +muon mass in u 0.113 428 9256 0.000 000 0029 u +muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0029 +muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085 +muon-proton mass ratio 0.112 609 5261 0.000 000 0029 +muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2 +natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J +natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV +natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m +natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c +natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg +neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J +neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV +neutron mass in u 1.008 664 915 97 0.000 000 000 43 u +neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 09 0.000 000 23 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46 +neutron-tau mass ratio 0.528 740 0.000 086 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1 +Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s +Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s +Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s +Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm +Planck length 1.616 252 e-35 0.000 081 e-35 m +Planck mass 2.176 44 e-8 0.000 11 e-8 kg +Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV +Planck temperature 1.416 785 e32 0.000 071 e32 K +Planck time 5.391 24 e-44 0.000 27 e-44 s +proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1 +proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m +proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m +proton-electron mass ratio 1836.152 672 47 0.000 000 80 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1 +proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg +proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J +proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV +proton mass in u 1.007 276 466 77 0.000 000 000 10 u +proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 39 0.000 000 23 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46 +proton rms charge radius 0.8768 e-15 0.0069 e-15 m +proton-tau mass ratio 0.528 012 0.000 086 +quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 527 0.000 073 m^-1 +Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz +Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV +Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 +second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K +shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1 +shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1 +shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m +tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m +tau-electron mass ratio 3477.48 0.57 +tau mass 3.167 77 e-27 0.000 52 e-27 kg +tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J +tau mass energy equivalent in MeV 1776.99 0.29 MeV +tau mass in u 1.907 68 0.000 31 u +tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1 +tau-muon mass ratio 16.8183 0.0027 +tau-neutron mass ratio 1.891 29 0.000 31 +tau-proton mass ratio 1.893 90 0.000 31 +Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2 +triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3 +triton-electron mass ratio 5496.921 5269 0.000 0051 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg +triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J +triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37 +triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010 +triton-proton mass ratio 2.993 717 0309 0.000 000 0025 +unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg +von Klitzing constant 25 812.807 557 0.000 018 ohm +weak mixing angle 0.222 55 0.000 56 +Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K""" + +txt2010 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 5361 0.000 0029 +alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg +alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV +alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV +atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV +atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K +atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C +atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3 +atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A +atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m +atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 385 05 0.000 000 60 V +atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2 +atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J +atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N +atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T +atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2 +atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s +atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1 +Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1 +Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1 +Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m +Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m +Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m +conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9652 0.000 0015 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg +deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J +deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV +deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u +deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18 +deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4 +electron g factor -2.002 319 304 361 53 0.000 000 000 000 53 +electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1 +electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4 +electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27 +electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75 +electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J +electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV +electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u +electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9896 0.000 0052 +electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4 +electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u +electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h +electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz +electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1 +electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K +electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg +elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C +elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1 +Faraday constant 96 485.3365 0.0021 C mol^-1 +Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1 +Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2 +fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3 +first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u +hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV +Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J +Hartree energy in eV 27.211 385 05 0.000 000 60 eV +hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz +hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1 +hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J +hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K +hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg +helion-electron mass ratio 5495.885 2754 0.000 0050 +helion g factor -4.255 250 613 0.000 000 050 +helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025 +helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg +helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J +helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV +helion mass in u 3.014 932 2468 0.000 000 0025 u +helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6707 0.000 000 0025 +hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u +hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV +hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J +hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K +hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg +inverse fine-structure constant 137.035 999 074 0.000 000 044 +inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u +inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV +inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J +inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K +inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg +inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm +Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u +joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV +joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h +joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz +joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1 +joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u +kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV +kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h +kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz +kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1 +kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J +kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u +kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV +kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h +kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz +kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb +molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1 +molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2843 0.000 0052 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22 +muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg +muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J +muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV +muon mass in u 0.113 428 9267 0.000 000 0029 u +muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5177 0.000 000 0028 +muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084 +muon-proton mass ratio 0.112 609 5272 0.000 000 0028 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J +natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV +natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m +natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c +natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45 +neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg +neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J +neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV +neutron mass in u 1.008 664 916 00 0.000 000 000 43 u +neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 00 0.000 000 22 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30 +neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42 +neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45 +neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1 +Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s +Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s +Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm +Planck length 1.616 199 e-35 0.000 097 e-35 m +Planck mass 2.176 51 e-8 0.000 13 e-8 kg +Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV +Planck temperature 1.416 833 e32 0.000 085 e32 K +Planck time 5.391 06 e-44 0.000 32 e-44 s +proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1 +proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m +proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m +proton-electron mass ratio 1836.152 672 45 0.000 000 75 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1 +proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg +proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J +proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV +proton mass in u 1.007 276 466 812 0.000 000 000 090 u +proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 31 0.000 000 22 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45 +proton rms charge radius 0.8775 e-15 0.0051 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 539 0.000 055 m^-1 +Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz +Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV +Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023 +second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K +shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1 +shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1 +shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2 +triton-electron mass ratio 5496.921 5267 0.000 0050 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg +triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J +triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 0308 0.000 000 0025 +unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg +von Klitzing constant 25 812.807 4434 0.000 0084 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K""" + +txt2014 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24 +alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg +alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV +atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV +atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K +atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3 +atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A +atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m +atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 02 0.000 000 17 V +atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2 +atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J +atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N +atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T +atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2 +atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s +atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1 +Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1 +Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1 +Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1 +Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m +Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m +Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m +conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4 +deuteron-electron mass ratio 3670.482 967 85 0.000 000 13 +deuteron g factor 0.857 438 2311 0.000 000 0048 +deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048 +deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg +deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J +deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015 +deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19 +deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 499 0.000 012 +electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 361 82 0.000 000 000 000 52 +electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1 +electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4 +electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26 +electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17 +electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J +electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV +electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9880 0.000 0046 +electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4 +electron-proton mag. mom. ratio -658.210 6866 0.000 0020 +electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4 +electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u +electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h +electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz +electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1 +electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K +electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg +elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1 +Faraday constant 96 485.332 89 0.000 59 C mol^-1 +Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3 +first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u +hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV +Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J +Hartree energy in eV 27.211 386 02 0.000 000 17 eV +hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz +hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1 +hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J +hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K +hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg +helion-electron mass ratio 5495.885 279 22 0.000 000 27 +helion g factor -4.255 250 616 0.000 000 050 +helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025 +helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg +helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J +helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV +helion mass in u 3.014 932 246 73 0.000 000 000 12 u +helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29 +hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u +hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV +hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J +hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K +hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg +inverse fine-structure constant 137.035 999 139 0.000 000 031 +inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u +inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV +inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J +inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K +inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg +inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm +Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u +joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV +joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h +joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz +joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1 +joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u +kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV +kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h +kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz +kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1 +kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J +kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg +kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u +kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV +kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h +kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz +kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb +molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m +muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m +muon-electron mass ratio 206.768 2826 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20 +muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg +muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J +muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV +muon mass in u 0.113 428 9257 0.000 000 0025 u +muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5262 0.000 000 0025 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J +natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV +natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m +natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c +natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 58 0.000 000 90 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg +neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J +neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV +neutron mass in u 1.008 664 915 88 0.000 000 000 49 u +neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 08 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30 +neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48 +neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51 +neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1 +Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s +Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s +Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm +Planck length 1.616 229 e-35 0.000 038 e-35 m +Planck mass 2.176 470 e-8 0.000 051 e-8 kg +Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV +Planck temperature 1.416 808 e32 0.000 033 e32 K +Planck time 5.391 16 e-44 0.000 13 e-44 s +proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1 +proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m +proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m +proton-electron mass ratio 1836.152 673 89 0.000 000 17 +proton g factor 5.585 694 702 0.000 000 017 +proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1 +proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085 +proton mag. shielding correction 25.691 e-6 0.011 e-6 +proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg +proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J +proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV +proton mass in u 1.007 276 466 879 0.000 000 000 091 u +proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 38 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51 +proton rms charge radius 0.8751 e-15 0.0061 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 508 0.000 065 m^-1 +Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz +Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV +Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014 +second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K +shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1 +shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1 +shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2 +triton-electron mass ratio 5496.921 535 88 0.000 000 26 +triton g factor 5.957 924 920 0.000 000 028 +triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014 +triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg +triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J +triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV +triton mass in u 3.015 500 716 32 0.000 000 000 11 u +triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22 +unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg +von Klitzing constant 25 812.807 4555 0.000 0059 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K""" + +txt2018 = """\ +alpha particle-electron mass ratio 7294.299 541 42 0.000 000 24 +alpha particle mass 6.644 657 3357 e-27 0.000 000 0020 e-27 kg +alpha particle mass energy equivalent 5.971 920 1914 e-10 0.000 000 0018 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 4066 0.000 0011 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 1777 e-3 0.000 000 0012 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 690 09 0.000 000 000 22 +alpha particle relative atomic mass 4.001 506 179 127 0.000 000 000 063 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic mass constant energy equivalent 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass constant energy equivalent in MeV 931.494 102 42 0.000 000 28 MeV +atomic mass unit-electron volt relationship 9.314 941 0242 e8 0.000 000 0028 e8 eV +atomic mass unit-hartree relationship 3.423 177 6874 e7 0.000 000 0010 e7 E_h +atomic mass unit-hertz relationship 2.252 342 718 71 e23 0.000 000 000 68 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6104 e14 0.000 000 0023 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass unit-kelvin relationship 1.080 954 019 16 e13 0.000 000 000 33 e13 K +atomic mass unit-kilogram relationship 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 3061 e-53 0.000 000 0015 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 379 9905 e-65 0.000 000 0038 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 817... e-34 (exact) J s +atomic unit of charge 1.602 176 634 e-19 (exact) C +atomic unit of charge density 1.081 202 384 57 e12 0.000 000 000 49 e12 C m^-3 +atomic unit of current 6.623 618 237 510 e-3 0.000 000 000 013 e-3 A +atomic unit of electric dipole mom. 8.478 353 6255 e-30 0.000 000 0013 e-30 C m +atomic unit of electric field 5.142 206 747 63 e11 0.000 000 000 78 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 4292 e21 0.000 000 0029 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 274 36 e-41 0.000 000 000 50 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 245 988 0.000 000 000 053 V +atomic unit of electric quadrupole mom. 4.486 551 5246 e-40 0.000 000 0014 e-40 C m^2 +atomic unit of energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +atomic unit of force 8.238 723 4983 e-8 0.000 000 0012 e-8 N +atomic unit of length 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +atomic unit of mag. dipole mom. 1.854 802 015 66 e-23 0.000 000 000 56 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 567 58 e5 0.000 000 000 71 e5 T +atomic unit of magnetizability 7.891 036 6008 e-29 0.000 000 0048 e-29 J T^-2 +atomic unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +atomic unit of momentum 1.992 851 914 10 e-24 0.000 000 000 30 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 055 45 e-10 0.000 000 000 17 e-10 F m^-1 +atomic unit of time 2.418 884 326 5857 e-17 0.000 000 000 0047 e-17 s +atomic unit of velocity 2.187 691 263 64 e6 0.000 000 000 33 e6 m s^-1 +Avogadro constant 6.022 140 76 e23 (exact) mol^-1 +Bohr magneton 9.274 010 0783 e-24 0.000 000 0028 e-24 J T^-1 +Bohr magneton in eV/T 5.788 381 8060 e-5 0.000 000 0017 e-5 eV T^-1 +Bohr magneton in Hz/T 1.399 624 493 61 e10 0.000 000 000 42 e10 Hz T^-1 +Bohr magneton in inverse meter per tesla 46.686 447 783 0.000 000 014 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 815 63 0.000 000 000 20 K T^-1 +Bohr radius 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +Boltzmann constant 1.380 649 e-23 (exact) J K^-1 +Boltzmann constant in eV/K 8.617 333 262... e-5 (exact) eV K^-1 +Boltzmann constant in Hz/K 2.083 661 912... e10 (exact) Hz K^-1 +Boltzmann constant in inverse meter per kelvin 69.503 480 04... (exact) m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 668 0.000 000 057 ohm +classical electron radius 2.817 940 3262 e-15 0.000 000 0013 e-15 m +Compton wavelength 2.426 310 238 67 e-12 0.000 000 000 73 e-12 m +conductance quantum 7.748 091 729... e-5 (exact) S +conventional value of ampere-90 1.000 000 088 87... (exact) A +conventional value of coulomb-90 1.000 000 088 87... (exact) C +conventional value of farad-90 0.999 999 982 20... (exact) F +conventional value of henry-90 1.000 000 017 79... (exact) H +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of ohm-90 1.000 000 017 79... (exact) ohm +conventional value of volt-90 1.000 000 106 66... (exact) V +conventional value of von Klitzing constant 25 812.807 (exact) ohm +conventional value of watt-90 1.000 000 195 53... (exact) W +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 551 e-4 0.000 000 012 e-4 +deuteron-electron mass ratio 3670.482 967 88 0.000 000 13 +deuteron g factor 0.857 438 2338 0.000 000 0022 +deuteron mag. mom. 4.330 735 094 e-27 0.000 000 011 e-27 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 4.669 754 570 e-4 0.000 000 012 e-4 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2338 0.000 000 0022 +deuteron mass 3.343 583 7724 e-27 0.000 000 0010 e-27 kg +deuteron mass energy equivalent 3.005 063 231 02 e-10 0.000 000 000 91 e-10 J +deuteron mass energy equivalent in MeV 1875.612 942 57 0.000 000 57 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 05 e-3 0.000 000 000 61 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 53 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 209 39 0.000 000 000 79 +deuteron-proton mass ratio 1.999 007 501 39 0.000 000 000 11 +deuteron relative atomic mass 2.013 553 212 745 0.000 000 000 040 +deuteron rms charge radius 2.127 99 e-15 0.000 74 e-15 m +electron charge to mass quotient -1.758 820 010 76 e11 0.000 000 000 53 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 4915 0.000 0056 +electron-deuteron mass ratio 2.724 437 107 462 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 362 56 0.000 000 000 000 35 +electron gyromag. ratio 1.760 859 630 23 e11 0.000 000 000 53 e11 s^-1 T^-1 +electron gyromag. ratio in MHz/T 28 024.951 4242 0.000 0085 MHz T^-1 +electron-helion mass ratio 1.819 543 074 573 e-4 0.000 000 000 079 e-4 +electron mag. mom. -9.284 764 7043 e-24 0.000 000 0028 e-24 J T^-1 +electron mag. mom. anomaly 1.159 652 181 28 e-3 0.000 000 000 18 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 28 0.000 000 000 000 18 +electron mag. mom. to nuclear magneton ratio -1838.281 971 88 0.000 000 11 +electron mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +electron mass energy equivalent 8.187 105 7769 e-14 0.000 000 0025 e-14 J +electron mass energy equivalent in MeV 0.510 998 950 00 0.000 000 000 15 MeV +electron mass in u 5.485 799 090 65 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 0888 e-7 0.000 000 0017 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9883 0.000 0046 +electron-muon mass ratio 4.836 331 69 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4424 e-4 0.000 000 0026 e-4 +electron-proton mag. mom. ratio -658.210 687 89 0.000 000 20 +electron-proton mass ratio 5.446 170 214 87 e-4 0.000 000 000 33 e-4 +electron relative atomic mass 5.485 799 090 65 e-4 0.000 000 000 16 e-4 +electron-tau mass ratio 2.875 85 e-4 0.000 19 e-4 +electron to alpha particle mass ratio 1.370 933 554 787 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 251 e-4 0.000 000 000 090 e-4 +electron volt 1.602 176 634 e-19 (exact) J +electron volt-atomic mass unit relationship 1.073 544 102 33 e-9 0.000 000 000 32 e-9 u +electron volt-hartree relationship 3.674 932 217 5655 e-2 0.000 000 000 0071 e-2 E_h +electron volt-hertz relationship 2.417 989 242... e14 (exact) Hz +electron volt-inverse meter relationship 8.065 543 937... e5 (exact) m^-1 +electron volt-joule relationship 1.602 176 634 e-19 (exact) J +electron volt-kelvin relationship 1.160 451 812... e4 (exact) K +electron volt-kilogram relationship 1.782 661 921... e-36 (exact) kg +elementary charge 1.602 176 634 e-19 (exact) C +elementary charge over h-bar 1.519 267 447... e15 (exact) A J^-1 +Faraday constant 96 485.332 12... (exact) C mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5693 e-3 0.000 000 0011 e-3 +first radiation constant 3.741 771 852... e-16 (exact) W m^2 +first radiation constant for spectral radiance 1.191 042 972... e-16 (exact) W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 322 05 e-8 0.000 000 000 88 e-8 u +hartree-electron volt relationship 27.211 386 245 988 0.000 000 000 053 eV +Hartree energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +Hartree energy in eV 27.211 386 245 988 0.000 000 000 053 eV +hartree-hertz relationship 6.579 683 920 502 e15 0.000 000 000 013 e15 Hz +hartree-inverse meter relationship 2.194 746 313 6320 e7 0.000 000 000 0043 e7 m^-1 +hartree-joule relationship 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +hartree-kelvin relationship 3.157 750 248 0407 e5 0.000 000 000 0061 e5 K +hartree-kilogram relationship 4.850 870 209 5432 e-35 0.000 000 000 0094 e-35 kg +helion-electron mass ratio 5495.885 280 07 0.000 000 24 +helion g factor -4.255 250 615 0.000 000 050 +helion mag. mom. -1.074 617 532 e-26 0.000 000 013 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 307 0.000 000 025 +helion mass 5.006 412 7796 e-27 0.000 000 0015 e-27 kg +helion mass energy equivalent 4.499 539 4125 e-10 0.000 000 0014 e-10 J +helion mass energy equivalent in MeV 2808.391 607 43 0.000 000 85 MeV +helion mass in u 3.014 932 247 175 0.000 000 000 097 u +helion molar mass 3.014 932 246 13 e-3 0.000 000 000 91 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 671 67 0.000 000 000 13 +helion relative atomic mass 3.014 932 247 175 0.000 000 000 097 +helion shielding shift 5.996 743 e-5 0.000 010 e-5 +hertz-atomic mass unit relationship 4.439 821 6652 e-24 0.000 000 0013 e-24 u +hertz-electron volt relationship 4.135 667 696... e-15 (exact) eV +hertz-hartree relationship 1.519 829 846 0570 e-16 0.000 000 000 0029 e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 15 e-34 (exact) J +hertz-kelvin relationship 4.799 243 073... e-11 (exact) K +hertz-kilogram relationship 7.372 497 323... e-51 (exact) kg +hyperfine transition frequency of Cs-133 9 192 631 770 (exact) Hz +inverse fine-structure constant 137.035 999 084 0.000 000 021 +inverse meter-atomic mass unit relationship 1.331 025 050 10 e-15 0.000 000 000 40 e-15 u +inverse meter-electron volt relationship 1.239 841 984... e-6 (exact) eV +inverse meter-hartree relationship 4.556 335 252 9120 e-8 0.000 000 000 0088 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 857... e-25 (exact) J +inverse meter-kelvin relationship 1.438 776 877... e-2 (exact) K +inverse meter-kilogram relationship 2.210 219 094... e-42 (exact) kg +inverse of conductance quantum 12 906.403 72... (exact) ohm +Josephson constant 483 597.848 4... e9 (exact) Hz V^-1 +joule-atomic mass unit relationship 6.700 535 2565 e9 0.000 000 0020 e9 u +joule-electron volt relationship 6.241 509 074... e18 (exact) eV +joule-hartree relationship 2.293 712 278 3963 e17 0.000 000 000 0045 e17 E_h +joule-hertz relationship 1.509 190 179... e33 (exact) Hz +joule-inverse meter relationship 5.034 116 567... e24 (exact) m^-1 +joule-kelvin relationship 7.242 970 516... e22 (exact) K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 087 3014 e-14 0.000 000 0028 e-14 u +kelvin-electron volt relationship 8.617 333 262... e-5 (exact) eV +kelvin-hartree relationship 3.166 811 563 4556 e-6 0.000 000 000 0061 e-6 E_h +kelvin-hertz relationship 2.083 661 912... e10 (exact) Hz +kelvin-inverse meter relationship 69.503 480 04... (exact) m^-1 +kelvin-joule relationship 1.380 649 e-23 (exact) J +kelvin-kilogram relationship 1.536 179 187... e-40 (exact) kg +kilogram-atomic mass unit relationship 6.022 140 7621 e26 0.000 000 0018 e26 u +kilogram-electron volt relationship 5.609 588 603... e35 (exact) eV +kilogram-hartree relationship 2.061 485 788 7409 e34 0.000 000 000 0040 e34 E_h +kilogram-hertz relationship 1.356 392 489... e50 (exact) Hz +kilogram-inverse meter relationship 4.524 438 335... e41 (exact) m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 657 260... e39 (exact) K +lattice parameter of silicon 5.431 020 511 e-10 0.000 000 089 e-10 m +lattice spacing of ideal Si (220) 1.920 155 716 e-10 0.000 000 032 e-10 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 645 804... e25 (exact) m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 780 111... e25 (exact) m^-3 +luminous efficacy 683 (exact) lm W^-1 +mag. flux quantum 2.067 833 848... e-15 (exact) Wb +molar gas constant 8.314 462 618... (exact) J mol^-1 K^-1 +molar mass constant 0.999 999 999 65 e-3 0.000 000 000 30 e-3 kg mol^-1 +molar mass of carbon-12 11.999 999 9958 e-3 0.000 000 0036 e-3 kg mol^-1 +molar Planck constant 3.990 312 712... e-10 (exact) J Hz^-1 mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 954 64... e-3 (exact) m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 969 54... e-3 (exact) m^3 mol^-1 +molar volume of silicon 1.205 883 199 e-5 0.000 000 060 e-5 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 1.173 444 110 e-14 0.000 000 026 e-14 m +muon-electron mass ratio 206.768 2830 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 30 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 47 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 03 0.000 000 20 +muon mass 1.883 531 627 e-28 0.000 000 042 e-28 kg +muon mass energy equivalent 1.692 833 804 e-11 0.000 000 038 e-11 J +muon mass energy equivalent in MeV 105.658 3755 0.000 0023 MeV +muon mass in u 0.113 428 9259 0.000 000 0025 u +muon molar mass 1.134 289 259 e-4 0.000 000 025 e-4 kg mol^-1 +muon-neutron mass ratio 0.112 454 5170 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5264 0.000 000 0025 +muon-tau mass ratio 5.946 35 e-2 0.000 40 e-2 +natural unit of action 1.054 571 817... e-34 (exact) J s +natural unit of action in eV s 6.582 119 569... e-16 (exact) eV s +natural unit of energy 8.187 105 7769 e-14 0.000 000 0025 e-14 J +natural unit of energy in MeV 0.510 998 950 00 0.000 000 000 15 MeV +natural unit of length 3.861 592 6796 e-13 0.000 000 0012 e-13 m +natural unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +natural unit of momentum 2.730 924 530 75 e-22 0.000 000 000 82 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 950 00 0.000 000 000 15 MeV/c +natural unit of time 1.288 088 668 19 e-21 0.000 000 000 39 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 905 81 e-15 0.000 000 000 75 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 73 0.000 000 89 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 71 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio in MHz/T 29.164 6931 0.000 0069 MHz T^-1 +neutron mag. mom. -9.662 3651 e-27 0.000 0023 e-27 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 498 04 e-27 0.000 000 000 95 e-27 kg +neutron mass energy equivalent 1.505 349 762 87 e-10 0.000 000 000 86 e-10 J +neutron mass energy equivalent in MeV 939.565 420 52 0.000 000 54 MeV +neutron mass in u 1.008 664 915 95 0.000 000 000 49 u +neutron molar mass 1.008 664 915 60 e-3 0.000 000 000 57 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 06 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 574 35 e-30 0.000 000 82 e-30 kg +neutron-proton mass difference energy equivalent 2.072 146 89 e-13 0.000 000 74 e-13 J +neutron-proton mass difference energy equivalent in MeV 1.293 332 36 0.000 000 46 MeV +neutron-proton mass difference in u 1.388 449 33 e-3 0.000 000 49 e-3 u +neutron-proton mass ratio 1.001 378 419 31 0.000 000 000 49 +neutron relative atomic mass 1.008 664 915 95 0.000 000 000 49 +neutron-tau mass ratio 0.528 779 0.000 036 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 30 e-11 0.000 15 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 83 e-39 0.000 15 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 7461 e-27 0.000 000 0015 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 258 44 e-8 0.000 000 000 96 e-8 eV T^-1 +nuclear magneton in inverse meter per tesla 2.542 623 413 53 e-2 0.000 000 000 78 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 267 7756 e-4 0.000 000 0011 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 2291 0.000 000 0023 MHz T^-1 +Planck constant 6.626 070 15 e-34 (exact) J Hz^-1 +Planck constant in eV/Hz 4.135 667 696... e-15 (exact) eV Hz^-1 +Planck length 1.616 255 e-35 0.000 018 e-35 m +Planck mass 2.176 434 e-8 0.000 024 e-8 kg +Planck mass energy equivalent in GeV 1.220 890 e19 0.000 014 e19 GeV +Planck temperature 1.416 784 e32 0.000 016 e32 K +Planck time 5.391 247 e-44 0.000 060 e-44 s +proton charge to mass quotient 9.578 833 1560 e7 0.000 000 0029 e7 C kg^-1 +proton Compton wavelength 1.321 409 855 39 e-15 0.000 000 000 40 e-15 m +proton-electron mass ratio 1836.152 673 43 0.000 000 11 +proton g factor 5.585 694 6893 0.000 000 0016 +proton gyromag. ratio 2.675 221 8744 e8 0.000 000 0011 e8 s^-1 T^-1 +proton gyromag. ratio in MHz/T 42.577 478 518 0.000 000 018 MHz T^-1 +proton mag. mom. 1.410 606 797 36 e-26 0.000 000 000 60 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 202 30 e-3 0.000 000 000 46 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 344 63 0.000 000 000 82 +proton mag. shielding correction 2.5689 e-5 0.0011 e-5 +proton mass 1.672 621 923 69 e-27 0.000 000 000 51 e-27 kg +proton mass energy equivalent 1.503 277 615 98 e-10 0.000 000 000 46 e-10 J +proton mass energy equivalent in MeV 938.272 088 16 0.000 000 29 MeV +proton mass in u 1.007 276 466 621 0.000 000 000 053 u +proton molar mass 1.007 276 466 27 e-3 0.000 000 000 31 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 37 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 12 0.000 000 000 49 +proton relative atomic mass 1.007 276 466 621 0.000 000 000 053 +proton rms charge radius 8.414 e-16 0.019 e-16 m +proton-tau mass ratio 0.528 051 0.000 036 +quantum of circulation 3.636 947 5516 e-4 0.000 000 0011 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1032 e-4 0.000 000 0022 e-4 m^2 s^-1 +reduced Compton wavelength 3.861 592 6796 e-13 0.000 000 0012 e-13 m +reduced muon Compton wavelength 1.867 594 306 e-15 0.000 000 042 e-15 m +reduced neutron Compton wavelength 2.100 194 1552 e-16 0.000 000 0012 e-16 m +reduced Planck constant 1.054 571 817... e-34 (exact) J s +reduced Planck constant in eV s 6.582 119 569... e-16 (exact) eV s +reduced Planck constant times c in MeV fm 197.326 980 4... (exact) MeV fm +reduced proton Compton wavelength 2.103 089 103 36 e-16 0.000 000 000 64 e-16 m +reduced tau Compton wavelength 1.110 538 e-16 0.000 075 e-16 m +Rydberg constant 10 973 731.568 160 0.000 021 m^-1 +Rydberg constant times c in Hz 3.289 841 960 2508 e15 0.000 000 000 0064 e15 Hz +Rydberg constant times hc in eV 13.605 693 122 994 0.000 000 000 026 eV +Rydberg constant times hc in J 2.179 872 361 1035 e-18 0.000 000 000 0042 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 707 537 06 0.000 000 000 45 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 870 523 58 0.000 000 000 45 +second radiation constant 1.438 776 877... e-2 (exact) m K +shielded helion gyromag. ratio 2.037 894 569 e8 0.000 000 024 e8 s^-1 T^-1 +shielded helion gyromag. ratio in MHz/T 32.434 099 42 0.000 000 38 MHz T^-1 +shielded helion mag. mom. -1.074 553 090 e-26 0.000 000 013 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 719 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5618 0.000 000 0089 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 151 e8 0.000 000 029 e8 s^-1 T^-1 +shielded proton gyromag. ratio in MHz/T 42.576 384 74 0.000 000 46 MHz T^-1 +shielded proton mag. mom. 1.410 570 560 e-26 0.000 000 015 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 599 0.000 000 030 +shielding difference of d and p in HD 2.0200 e-8 0.0020 e-8 +shielding difference of t and p in HT 2.4140 e-8 0.0020 e-8 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 374 419... e-8 (exact) W m^-2 K^-4 +tau Compton wavelength 6.977 71 e-16 0.000 47 e-16 m +tau-electron mass ratio 3477.23 0.23 +tau energy equivalent 1776.86 0.12 MeV +tau mass 3.167 54 e-27 0.000 21 e-27 kg +tau mass energy equivalent 2.846 84 e-10 0.000 19 e-10 J +tau mass in u 1.907 54 0.000 13 u +tau molar mass 1.907 54 e-3 0.000 13 e-3 kg mol^-1 +tau-muon mass ratio 16.8170 0.0011 +tau-neutron mass ratio 1.891 15 0.000 13 +tau-proton mass ratio 1.893 76 0.000 13 +Thomson cross section 6.652 458 7321 e-29 0.000 000 0060 e-29 m^2 +triton-electron mass ratio 5496.921 535 73 0.000 000 27 +triton g factor 5.957 924 931 0.000 000 012 +triton mag. mom. 1.504 609 5202 e-26 0.000 000 0030 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6651 e-3 0.000 000 0032 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 4656 0.000 000 0059 +triton mass 5.007 356 7446 e-27 0.000 000 0015 e-27 kg +triton mass energy equivalent 4.500 387 8060 e-10 0.000 000 0014 e-10 J +triton mass energy equivalent in MeV 2808.921 132 98 0.000 000 85 MeV +triton mass in u 3.015 500 716 21 0.000 000 000 12 u +triton molar mass 3.015 500 715 17 e-3 0.000 000 000 92 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 034 14 0.000 000 000 15 +triton relative atomic mass 3.015 500 716 21 0.000 000 000 12 +triton to proton mag. mom. ratio 1.066 639 9191 0.000 000 0021 +unified atomic mass unit 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +vacuum electric permittivity 8.854 187 8128 e-12 0.000 000 0013 e-12 F m^-1 +vacuum mag. permeability 1.256 637 062 12 e-6 0.000 000 000 19 e-6 N A^-2 +von Klitzing constant 25 812.807 45... (exact) ohm +weak mixing angle 0.222 90 0.000 30 +Wien frequency displacement law constant 5.878 925 757... e10 (exact) Hz K^-1 +Wien wavelength displacement law constant 2.897 771 955... e-3 (exact) m K +W to Z mass ratio 0.881 53 0.000 17 """ + +# ----------------------------------------------------------------------------- + +physical_constants: dict[str, tuple[float, str, float]] = {} + + +def parse_constants_2002to2014(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:55].rstrip() + val = float(line[55:77].replace(' ', '').replace('...', '')) + uncert = float(line[77:99].replace(' ', '').replace('(exact)', '0')) + units = line[99:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +def parse_constants_2018toXXXX(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:60].rstrip() + val = float(line[60:85].replace(' ', '').replace('...', '')) + uncert = float(line[85:110].replace(' ', '').replace('(exact)', '0')) + units = line[110:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +_physical_constants_2002 = parse_constants_2002to2014(txt2002) +_physical_constants_2006 = parse_constants_2002to2014(txt2006) +_physical_constants_2010 = parse_constants_2002to2014(txt2010) +_physical_constants_2014 = parse_constants_2002to2014(txt2014) +_physical_constants_2018 = parse_constants_2018toXXXX(txt2018) + + +physical_constants.update(_physical_constants_2002) +physical_constants.update(_physical_constants_2006) +physical_constants.update(_physical_constants_2010) +physical_constants.update(_physical_constants_2014) +physical_constants.update(_physical_constants_2018) +_current_constants = _physical_constants_2018 +_current_codata = "CODATA 2018" + +# check obsolete values +_obsolete_constants = {} +for k in physical_constants: + if k not in _current_constants: + _obsolete_constants[k] = True + +# generate some additional aliases +_aliases = {} +for k in _physical_constants_2002: + if 'magn.' in k: + _aliases[k] = k.replace('magn.', 'mag.') +for k in _physical_constants_2006: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') +for k in _physical_constants_2018: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') + +# CODATA 2018: renamed and no longer exact; use as aliases +_aliases['mag. constant'] = 'vacuum mag. permeability' +_aliases['electric constant'] = 'vacuum electric permittivity' + + +class ConstantWarning(DeprecationWarning): + """Accessing a constant no longer in current CODATA data set""" + pass + + +def _check_obsolete(key: str) -> None: + if key in _obsolete_constants and key not in _aliases: + warnings.warn(f"Constant '{key}' is not in current {_current_codata} data set", + ConstantWarning, stacklevel=3) + + +def value(key: str) -> float: + """ + Value in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + value : float + Value in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.value('elementary charge') + 1.602176634e-19 + + """ + _check_obsolete(key) + return physical_constants[key][0] + + +def unit(key: str) -> str: + """ + Unit in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + unit : Python string + Unit in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.unit('proton mass') + 'kg' + + """ + _check_obsolete(key) + return physical_constants[key][1] + + +def precision(key: str) -> float: + """ + Relative precision in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + prec : float + Relative precision in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.precision('proton mass') + 5.1e-37 + + """ + _check_obsolete(key) + return physical_constants[key][2] / physical_constants[key][0] + + +def find(sub: str | None = None, disp: bool = False) -> Any: + """ + Return list of physical_constant keys containing a given string. + + Parameters + ---------- + sub : str + Sub-string to search keys for. By default, return all keys. + disp : bool + If True, print the keys that are found and return None. + Otherwise, return the list of keys without printing anything. + + Returns + ------- + keys : list or None + If `disp` is False, the list of keys is returned. + Otherwise, None is returned. + + Examples + -------- + >>> from scipy.constants import find, physical_constants + + Which keys in the ``physical_constants`` dictionary contain 'boltzmann'? + + >>> find('boltzmann') + ['Boltzmann constant', + 'Boltzmann constant in Hz/K', + 'Boltzmann constant in eV/K', + 'Boltzmann constant in inverse meter per kelvin', + 'Stefan-Boltzmann constant'] + + Get the constant called 'Boltzmann constant in Hz/K': + + >>> physical_constants['Boltzmann constant in Hz/K'] + (20836619120.0, 'Hz K^-1', 0.0) + + Find constants with 'radius' in the key: + + >>> find('radius') + ['Bohr radius', + 'classical electron radius', + 'deuteron rms charge radius', + 'proton rms charge radius'] + >>> physical_constants['classical electron radius'] + (2.8179403262e-15, 'm', 1.3e-24) + + """ + if sub is None: + result = list(_current_constants.keys()) + else: + result = [key for key in _current_constants + if sub.lower() in key.lower()] + + result.sort() + if disp: + for key in result: + print(key) + return + else: + return result + + +c = value('speed of light in vacuum') +mu0 = value('vacuum mag. permeability') +epsilon0 = value('vacuum electric permittivity') + +# Table is lacking some digits for exact values: calculate from definition +exact_values = { + 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0), + 'kilogram-joule relationship': (c * c, 'J', 0.0), + 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0), +} + +# sanity check +for key in exact_values: + val = physical_constants[key][0] + if abs(exact_values[key][0] - val) / val > 1e-9: + raise ValueError("Constants.codata: exact values too far off.") + if exact_values[key][2] == 0 and physical_constants[key][2] != 0: + raise ValueError("Constants.codata: value not exact") + +physical_constants.update(exact_values) + +_tested_keys = ['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of mom.um', + 'natural unit of mom.um in MeV/c', + 'natural unit of length', + 'natural unit of time'] + +# finally, insert aliases for values +for k, v in list(_aliases.items()): + if v in _current_constants or v in _tested_keys: + physical_constants[k] = physical_constants[v] + else: + del _aliases[k] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/_constants.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fa379828ddd62bedc92f2e0e81b51ce550ca90fd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/_constants.py @@ -0,0 +1,362 @@ +""" +Collection of physical constants and conversion factors. + +Most constants are in SI units, so you can do +print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots' + +The list is not meant to be comprehensive, but just convenient for everyday use. +""" + +from __future__ import annotations + +import math as _math +from typing import TYPE_CHECKING, Any + +from ._codata import value as _cd +import numpy as _np + +if TYPE_CHECKING: + import numpy.typing as npt + +""" +BasSw 2006 +physical constants: imported from CODATA +unit conversion: see e.g., NIST special publication 811 +Use at own risk: double-check values before calculating your Mars orbit-insertion burn. +Some constants exist in a few variants, which are marked with suffixes. +The ones without any suffix should be the most common ones. +""" + +__all__ = [ + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'quecto', 'quetta', 'ronna', 'ronto', + 'short_ton', 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +# mathematical constants +pi = _math.pi +golden = golden_ratio = (1 + _math.sqrt(5)) / 2 + +# SI prefixes +quetta = 1e30 +ronna = 1e27 +yotta = 1e24 +zetta = 1e21 +exa = 1e18 +peta = 1e15 +tera = 1e12 +giga = 1e9 +mega = 1e6 +kilo = 1e3 +hecto = 1e2 +deka = 1e1 +deci = 1e-1 +centi = 1e-2 +milli = 1e-3 +micro = 1e-6 +nano = 1e-9 +pico = 1e-12 +femto = 1e-15 +atto = 1e-18 +zepto = 1e-21 +yocto = 1e-24 +ronto = 1e-27 +quecto = 1e-30 + +# binary prefixes +kibi = 2**10 +mebi = 2**20 +gibi = 2**30 +tebi = 2**40 +pebi = 2**50 +exbi = 2**60 +zebi = 2**70 +yobi = 2**80 + +# physical constants +c = speed_of_light = _cd('speed of light in vacuum') +mu_0 = _cd('vacuum mag. permeability') +epsilon_0 = _cd('vacuum electric permittivity') +h = Planck = _cd('Planck constant') +hbar = h / (2 * pi) +G = gravitational_constant = _cd('Newtonian constant of gravitation') +g = _cd('standard acceleration of gravity') +e = elementary_charge = _cd('elementary charge') +R = gas_constant = _cd('molar gas constant') +alpha = fine_structure = _cd('fine-structure constant') +N_A = Avogadro = _cd('Avogadro constant') +k = Boltzmann = _cd('Boltzmann constant') +sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant') +Wien = _cd('Wien wavelength displacement law constant') +Rydberg = _cd('Rydberg constant') + +# mass in kg +gram = 1e-3 +metric_ton = 1e3 +grain = 64.79891e-6 +lb = pound = 7000 * grain # avoirdupois +blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0) +slug = blob / 12 # lbf*s**2/foot (added in 1.0.0) +oz = ounce = pound / 16 +stone = 14 * pound +long_ton = 2240 * pound +short_ton = 2000 * pound + +troy_ounce = 480 * grain # only for metals / gems +troy_pound = 12 * troy_ounce +carat = 200e-6 + +m_e = electron_mass = _cd('electron mass') +m_p = proton_mass = _cd('proton mass') +m_n = neutron_mass = _cd('neutron mass') +m_u = u = atomic_mass = _cd('atomic mass constant') + +# angle in rad +degree = pi / 180 +arcmin = arcminute = degree / 60 +arcsec = arcsecond = arcmin / 60 + +# time in second +minute = 60.0 +hour = 60 * minute +day = 24 * hour +week = 7 * day +year = 365 * day +Julian_year = 365.25 * day + +# length in meter +inch = 0.0254 +foot = 12 * inch +yard = 3 * foot +mile = 1760 * yard +mil = inch / 1000 +pt = point = inch / 72 # typography +survey_foot = 1200.0 / 3937 +survey_mile = 5280 * survey_foot +nautical_mile = 1852.0 +fermi = 1e-15 +angstrom = 1e-10 +micron = 1e-6 +au = astronomical_unit = 149597870700.0 +light_year = Julian_year * c +parsec = au / arcsec + +# pressure in pascal +atm = atmosphere = _cd('standard atmosphere') +bar = 1e5 +torr = mmHg = atm / 760 +psi = pound * g / (inch * inch) + +# area in meter**2 +hectare = 1e4 +acre = 43560 * foot**2 + +# volume in meter**3 +litre = liter = 1e-3 +gallon = gallon_US = 231 * inch**3 # US +# pint = gallon_US / 8 +fluid_ounce = fluid_ounce_US = gallon_US / 128 +bbl = barrel = 42 * gallon_US # for oil + +gallon_imp = 4.54609e-3 # UK +fluid_ounce_imp = gallon_imp / 160 + +# speed in meter per second +kmh = 1e3 / hour +mph = mile / hour +# approx value of mach at 15 degrees in 1 atm. Is this a common value? +mach = speed_of_sound = 340.5 +knot = nautical_mile / hour + +# temperature in kelvin +zero_Celsius = 273.15 +degree_Fahrenheit = 1/1.8 # only for differences + +# energy in joule +eV = electron_volt = elementary_charge # * 1 Volt +calorie = calorie_th = 4.184 +calorie_IT = 4.1868 +erg = 1e-7 +Btu_th = pound * degree_Fahrenheit * calorie_th / gram +Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram +ton_TNT = 1e9 * calorie_th +# Wh = watt_hour + +# power in watt +hp = horsepower = 550 * foot * pound * g + +# force in newton +dyn = dyne = 1e-5 +lbf = pound_force = pound * g +kgf = kilogram_force = g # * 1 kg + +# functions for conversions that are not linear + + +def convert_temperature( + val: npt.ArrayLike, + old_scale: str, + new_scale: str, +) -> Any: + """ + Convert from a temperature scale to another one among Celsius, Kelvin, + Fahrenheit, and Rankine scales. + + Parameters + ---------- + val : array_like + Value(s) of the temperature(s) to be converted expressed in the + original scale. + old_scale : str + Specifies as a string the original scale from which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + new_scale : str + Specifies as a string the new scale to which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + + Returns + ------- + res : float or array of floats + Value(s) of the converted temperature(s) expressed in the new scale. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy.constants import convert_temperature + >>> import numpy as np + >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin') + array([ 233.15, 313.15]) + + """ + # Convert from `old_scale` to Kelvin + if old_scale.lower() in ['celsius', 'c']: + tempo = _np.asanyarray(val) + zero_Celsius + elif old_scale.lower() in ['kelvin', 'k']: + tempo = _np.asanyarray(val) + elif old_scale.lower() in ['fahrenheit', 'f']: + tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius + elif old_scale.lower() in ['rankine', 'r']: + tempo = _np.asanyarray(val) * 5 / 9 + else: + raise NotImplementedError("%s scale is unsupported: supported scales " + "are Celsius, Kelvin, Fahrenheit, and " + "Rankine" % old_scale) + # and from Kelvin to `new_scale`. + if new_scale.lower() in ['celsius', 'c']: + res = tempo - zero_Celsius + elif new_scale.lower() in ['kelvin', 'k']: + res = tempo + elif new_scale.lower() in ['fahrenheit', 'f']: + res = (tempo - zero_Celsius) * 9 / 5 + 32 + elif new_scale.lower() in ['rankine', 'r']: + res = tempo * 9 / 5 + else: + raise NotImplementedError("'%s' scale is unsupported: supported " + "scales are 'Celsius', 'Kelvin', " + "'Fahrenheit', and 'Rankine'" % new_scale) + + return res + + +# optics + + +def lambda2nu(lambda_: npt.ArrayLike) -> Any: + """ + Convert wavelength to optical frequency + + Parameters + ---------- + lambda_ : array_like + Wavelength(s) to be converted. + + Returns + ------- + nu : float or array of floats + Equivalent optical frequency. + + Notes + ----- + Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import lambda2nu, speed_of_light + >>> import numpy as np + >>> lambda2nu(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(lambda_) + + +def nu2lambda(nu: npt.ArrayLike) -> Any: + """ + Convert optical frequency to wavelength. + + Parameters + ---------- + nu : array_like + Optical frequency to be converted. + + Returns + ------- + lambda : float or array of floats + Equivalent wavelength(s). + + Notes + ----- + Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import nu2lambda, speed_of_light + >>> import numpy as np + >>> nu2lambda(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(nu) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/codata.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/codata.py new file mode 100644 index 0000000000000000000000000000000000000000..72177f20545d673d5bbb179c705f72cdbb1afcc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/codata.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning', 'txt2002', 'txt2006', 'txt2010', 'txt2014', + 'txt2018', 'parse_constants_2002to2014', + 'parse_constants_2018toXXXX', 'k', 'c', 'mu0', 'epsilon0', + 'exact_values', 'key', 'val', 'v' + +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="codata", + private_modules=["_codata"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/constants.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..855901ba802881090b99b7e8972de741331c7ab9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/constants.py @@ -0,0 +1,53 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'short_ton', + 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="constants", + private_modules=["_constants"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2140b7d704d192b930acc353fefb3e45db9c91e0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d11adbb8ff55c63df0d9165db569fa8819c91006 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2128e8060eae03d8582ce2efb023136402e2752c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9b69aa20351832eebe725d73c5e6aacaefdf1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py @@ -0,0 +1,57 @@ +from scipy.constants import find, value, ConstantWarning, c, speed_of_light +from numpy.testing import (assert_equal, assert_, assert_almost_equal, + suppress_warnings) +import scipy.constants._codata as _cd + + +def test_find(): + keys = find('weak mixing', disp=False) + assert_equal(keys, ['weak mixing angle']) + + keys = find('qwertyuiop', disp=False) + assert_equal(keys, []) + + keys = find('natural unit', disp=False) + assert_equal(keys, sorted(['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of momentum', + 'natural unit of momentum in MeV/c', + 'natural unit of length', + 'natural unit of time'])) + + +def test_basic_table_parse(): + c_s = 'speed of light in vacuum' + assert_equal(value(c_s), c) + assert_equal(value(c_s), speed_of_light) + + +def test_basic_lookup(): + assert_equal('%d %s' % (_cd.c, _cd.unit('speed of light in vacuum')), + '299792458 m s^-1') + + +def test_find_all(): + assert_(len(find(disp=False)) > 300) + + +def test_find_single(): + assert_equal(find('Wien freq', disp=False)[0], + 'Wien frequency displacement law constant') + + +def test_2002_vs_2006(): + assert_almost_equal(value('magn. flux quantum'), + value('mag. flux quantum')) + + +def test_exact_values(): + # Check that updating stored values with exact ones worked. + with suppress_warnings() as sup: + sup.filter(ConstantWarning) + for key in _cd.exact_values: + assert_((_cd.exact_values[key][0] - value(key)) / value(key) == 0) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..8d7461d978fa1f2ff0267429d32ca58193b3b73e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py @@ -0,0 +1,35 @@ +from numpy.testing import assert_equal, assert_allclose +import scipy.constants as sc + + +def test_convert_temperature(): + assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0) + assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'), + [273.15, 273.15]) + assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'), + [-273.15, -273.15]) + assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15]) + assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'), + [32, 32]) + assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32]) + assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67], + rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'), + [0., 0.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'), + [32., 32.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'), + [273.15, 0.], rtol=0., atol=1e-13) + + +def test_lambda_to_nu(): + assert_equal(sc.lambda2nu([sc.speed_of_light, 1]), [1, sc.speed_of_light]) + + +def test_nu_to_lambda(): + assert_equal(sc.nu2lambda([sc.speed_of_light, 1]), [1, sc.speed_of_light]) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abc43e71b15052f0f363b18ea265c3f181aebcf0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb39aaafea25808a48292bbd655badcc9397c76 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..594593b9679917bdec9a1aedc551d5102e18d8c4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9d16d991081499c3c7c26dc2059711994b786eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..338e0a2bb592f7d9dc5613cbbe8f0a0000b41fe9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..967d0cc7ab2d37ae722a6c1a64baf4ae0b8e7b2e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a9528571fc2e3750b03d67dae63a372108a5cf1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fb185cbd249b2236a19f679bbf0a4eb45f0e474 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a247321dc8bb307d99602d018e8c2872d1ba202 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34e9c2e92a7fc999151432f38b25c025a921a883 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2731656418122ffc4631b6e302e652d20239d944 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b969e02548f31b6e82692ff6607a85a32d34952f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2505756290fcb93864ba1b6bef2512ee7cddfe7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..500b60d76880c20de1b3a3ef6eb2a35af4ba95a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d071589212c38f70c2778742004597d4738b43f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..975d2968e440df91ba716b0801d0531f36fdc48a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a4a0822edba3087d230ec706c81665bbcb7fe1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79ef9749d75533c46835edba74a690887d4a69fc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..623daf38597177a8bc4d14a40329a8d7089beaf7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ede8d9f6af7e469d6cdf3121903791383562a3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b80fc3429abc5de93bf0204650721eee0da583b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99870ea796bd16e31a67dbd0ff1228e38d078e5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c14cfe76ecb868439684da4afc617c53c6f35ffe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d757c54eadcd5fd6fcf5da98a32f2ce0c1ee7b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5613c791c18066c7f3f0273bbaa2d45302dfecb6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c88fc8bad504469fa1d0f30e98ce3e7353e96f4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4968a90a9bc00ec1c83c7acaf68d64e16039612e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82767e9210cbc4866b1c463b8625964d18e1b2dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..838c71a5b1abc0ee89611e98dd62fcbf09412089 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04e4e0846c01a2ebf353453e631fd62a797e29d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f1c4a8c69f674966032a3619ca0f2b2a3af62f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b1aa2c80323c707e68984ffd816f5a6a13adf7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..460eacfe5c0bfcb2d8b69b89314bacb9500cc905 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b779ed959f1b5ee9ba83ee17f1a0603c4afd5bd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..9bc400640117d9b917ec93aeae25d8d35906dadc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py @@ -0,0 +1,209 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.integrate import quad_vec + +from multiprocessing.dummy import Pool + + +quadrature_params = pytest.mark.parametrize( + 'quadrature', [None, "gk15", "gk21", "trapezoid"]) + + +@quadrature_params +def test_quad_vec_simple(quadrature): + n = np.arange(10) + def f(x): + return x ** n + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(epsabs=epsabs, quadrature=quadrature) + + exact = 2**(n+1)/(n + 1) + + res, err = quad_vec(f, 0, 2, norm='max', **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err = quad_vec(f, 0, 2, norm='2', **kwargs) + assert np.linalg.norm(res - exact) < epsabs + + res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err, *rest = quad_vec(f, 0, 2, norm='max', + epsrel=1e-8, + full_output=True, + limit=10000, + **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + +@quadrature_params +def test_quad_vec_simple_inf(quadrature): + def f(x): + return 1 / (1 + np.float64(x) ** 2) + + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature) + + res, err = quad_vec(f, 0, np.inf, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, -np.inf, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, 0, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, 0, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, np.inf, **kwargs) + assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, -np.inf, **kwargs) + assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, -np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + def f(x): + return np.sin(x + 2) / (1 + x ** 2) + exact = np.pi / np.e * np.sin(2) + epsabs = 1e-5 + + res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs, + quadrature=quadrature, full_output=True) + assert info.status == 1 + assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err)) + + +def test_quad_vec_args(): + def f(x, a): + return x * (x + a) * np.arange(3) + a = 2 + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=(a,)) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +def _lorenzian(x): + return 1 / (1 + x**2) + + +def test_quad_vec_pool(): + f = _lorenzian + res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + with Pool(10) as pool: + def f(x): + return 1 / (1 + x ** 2) + res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + +def _func_with_args(x, a): + return x * (x + a) * np.arange(3) + + +@pytest.mark.parametrize('extra_args', [2, (2,)]) +@pytest.mark.parametrize('workers', [1, 10]) +def test_quad_vec_pool_args(extra_args, workers): + f = _func_with_args + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + with Pool(workers) as pool: + res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +@quadrature_params +def test_num_eval(quadrature): + def f(x): + count[0] += 1 + return x**5 + + count = [0] + res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature) + assert res[2].neval == count[0] + + +def test_info(): + def f(x): + return np.ones((3, 2, 1)) + + res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True) + + assert info.success is True + assert info.status == 0 + assert info.message == 'Target precision reached.' + assert info.neval > 0 + assert info.intervals.shape[1] == 2 + assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1) + assert info.errors.shape == (info.intervals.shape[0],) + + +def test_nan_inf(): + def f_nan(x): + return np.nan + + def f_inf(x): + return np.inf if x < 0.1 else 1/x + + res, err, info = quad_vec(f_nan, 0, 1, full_output=True) + assert info.status == 3 + + res, err, info = quad_vec(f_inf, 0, 1, full_output=True) + assert info.status == 3 + + +@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0), + (-np.inf, np.inf), (np.inf, -np.inf)]) +def test_points(a, b): + # Check that initial interval splitting is done according to + # `points`, by checking that consecutive sets of 15 point (for + # gk15) function evaluations lie between `points` + + points = (0, 0.25, 0.5, 0.75, 1.0) + points += tuple(-x for x in points) + + quadrature_points = 15 + interval_sets = [] + count = 0 + + def f(x): + nonlocal count + + if count % quadrature_points == 0: + interval_sets.append(set()) + + count += 1 + interval_sets[-1].add(float(x)) + return 0.0 + + quad_vec(f, a, b, points=points, quadrature='gk15', limit=0) + + # Check that all point sets lie in a single `points` interval + for p in interval_sets: + j = np.searchsorted(sorted(points), tuple(p)) + assert np.all(j == j[0]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..f34d45d94fd754bc8d2c90609ac308f6d3e4706b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,218 @@ +import itertools +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-D array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py new file mode 100644 index 0000000000000000000000000000000000000000..edaf80bec586831d255c6df48e2b953f40a563fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py @@ -0,0 +1,711 @@ +import sys + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import numpy as np +from numpy.testing import (assert_, assert_array_equal, assert_allclose, + assert_equal) +from pytest import raises as assert_raises + +from scipy.sparse import coo_matrix +from scipy.special import erf +from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac, + estimate_bc_jac, compute_jac_indices, + construct_global_jac, solve_bvp) + + +def exp_fun(x, y): + return np.vstack((y[1], y[0])) + + +def exp_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = 1 + df_dy[1, 1] = 0 + return df_dy + + +def exp_bc(ya, yb): + return np.hstack((ya[0] - 1, yb[0])) + + +def exp_bc_complex(ya, yb): + return np.hstack((ya[0] - 1 - 1j, yb[0])) + + +def exp_bc_jac(ya, yb): + dbc_dya = np.array([ + [1, 0], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def exp_sol(x): + return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2)) + + +def sl_fun(x, y, p): + return np.vstack((y[1], -p[0]**2 * y[0])) + + +def sl_fun_jac(x, y, p): + n, m = y.shape + df_dy = np.empty((n, 2, m)) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -p[0]**2 + df_dy[1, 1] = 0 + + df_dp = np.empty((n, 1, m)) + df_dp[0, 0] = 0 + df_dp[1, 0] = -2 * p[0] * y[0] + + return df_dy, df_dp + + +def sl_bc(ya, yb, p): + return np.hstack((ya[0], yb[0], ya[1] - p[0])) + + +def sl_bc_jac(ya, yb, p): + dbc_dya = np.zeros((3, 2)) + dbc_dya[0, 0] = 1 + dbc_dya[2, 1] = 1 + + dbc_dyb = np.zeros((3, 2)) + dbc_dyb[1, 0] = 1 + + dbc_dp = np.zeros((3, 1)) + dbc_dp[2, 0] = -1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def sl_sol(x, p): + return np.sin(p[0] * x) + + +def emden_fun(x, y): + return np.vstack((y[1], -y[0]**5)) + + +def emden_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -5 * y[0]**4 + df_dy[1, 1] = 0 + return df_dy + + +def emden_bc(ya, yb): + return np.array([ya[1], yb[0] - (3/4)**0.5]) + + +def emden_bc_jac(ya, yb): + dbc_dya = np.array([ + [0, 1], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def emden_sol(x): + return (1 + x**2/3)**-0.5 + + +def undefined_fun(x, y): + return np.zeros_like(y) + + +def undefined_bc(ya, yb): + return np.array([ya[0], yb[0] - 1]) + + +def big_fun(x, y): + f = np.zeros_like(y) + f[::2] = y[1::2] + return f + + +def big_bc(ya, yb): + return np.hstack((ya[::2], yb[::2] - 1)) + + +def big_sol(x, n): + y = np.ones((2 * n, x.size)) + y[::2] = x + return x + + +def big_fun_with_parameters(x, y, p): + """ Big version of sl_fun, with two parameters. + + The two differential equations represented by sl_fun are broadcast to the + number of rows of y, rotating between the parameters p[0] and p[1]. + Here are the differential equations: + + dy[0]/dt = y[1] + dy[1]/dt = -p[0]**2 * y[0] + dy[2]/dt = y[3] + dy[3]/dt = -p[1]**2 * y[2] + dy[4]/dt = y[5] + dy[5]/dt = -p[0]**2 * y[4] + dy[6]/dt = y[7] + dy[7]/dt = -p[1]**2 * y[6] + . + . + . + + """ + f = np.zeros_like(y) + f[::2] = y[1::2] + f[1::4] = -p[0]**2 * y[::4] + f[3::4] = -p[1]**2 * y[2::4] + return f + + +def big_fun_with_parameters_jac(x, y, p): + # big version of sl_fun_jac, with two parameters + n, m = y.shape + df_dy = np.zeros((n, n, m)) + df_dy[range(0, n, 2), range(1, n, 2)] = 1 + df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2 + df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2 + + df_dp = np.zeros((n, 2, m)) + df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)] + df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)] + + return df_dy, df_dp + + +def big_bc_with_parameters(ya, yb, p): + # big version of sl_bc, with two parameters + return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1])) + + +def big_bc_with_parameters_jac(ya, yb, p): + # big version of sl_bc_jac, with two parameters + n = ya.shape[0] + dbc_dya = np.zeros((n + 2, n)) + dbc_dyb = np.zeros((n + 2, n)) + + dbc_dya[range(n // 2), range(0, n, 2)] = 1 + dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1 + + dbc_dp = np.zeros((n + 2, 2)) + dbc_dp[n, 0] = -1 + dbc_dya[n, 1] = 1 + dbc_dp[n + 1, 1] = -1 + dbc_dya[n + 1, 3] = 1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def big_sol_with_parameters(x, p): + # big version of sl_sol, with two parameters + return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x))) + + +def shock_fun(x, y): + eps = 1e-3 + return np.vstack(( + y[1], + -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) + + np.pi * x * np.sin(np.pi * x)) / eps + )) + + +def shock_bc(ya, yb): + return np.array([ya[0] + 2, yb[0]]) + + +def shock_sol(x): + eps = 1e-3 + k = np.sqrt(2 * eps) + return np.cos(np.pi * x) + erf(x / k) / erf(1 / k) + + +def nonlin_bc_fun(x, y): + # laplace eq. + return np.stack([y[1], np.zeros_like(x)]) + + +def nonlin_bc_bc(ya, yb): + phiA, phipA = ya + phiC, phipC = yb + + kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9 + + # Butler-Volmer Kinetics at Anode + hA = 0.0-phiA-0.0 + iA = ioA * (np.exp(f*hA) - np.exp(-f*hA)) + res0 = iA + kappa * phipA + + # Butler-Volmer Kinetics at Cathode + hC = V - phiC - 1.0 + iC = ioC * (np.exp(f*hC) - np.exp(-f*hC)) + res1 = iC - kappa*phipC + + return np.array([res0, res1]) + + +def nonlin_bc_sol(x): + return -0.13426436116763119 - 1.1308709 * x + + +def test_modify_mesh(): + x = np.array([0, 1, 3, 9], dtype=float) + x_new = modify_mesh(x, np.array([0]), np.array([2])) + assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9])) + + x = np.array([-6, -3, 0, 3, 6], dtype=float) + x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3])) + assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6]) + + +def test_compute_fun_jac(): + x = np.linspace(0, 1, 5) + y = np.empty((2, x.shape[0])) + y[0] = 0.01 + y[1] = 0.02 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p) + df_dy_an = exp_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + x = np.linspace(0, np.pi, 5) + y = np.empty((2, x.shape[0])) + y[0] = np.sin(x) + y[1] = np.cos(x) + p = np.array([1.0]) + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_an, df_dp_an = sl_fun_jac(x, y, p) + assert_allclose(df_dy, df_dy_an) + assert_allclose(df_dp, df_dp_an) + + x = np.linspace(0, 1, 10) + y = np.empty((2, x.shape[0])) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p) + df_dy_an = emden_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + +def test_compute_bc_jac(): + ya = np.array([-1.0, 2]) + yb = np.array([0.5, 3]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + ya = np.array([0.0, 1]) + yb = np.array([0.0, -1]) + p = np.array([0.5]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p) + dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_allclose(dbc_dp, dbc_dp_an) + + ya = np.array([0.5, 100]) + yb = np.array([-1000, 10.5]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + +def test_compute_jac_indices(): + n = 2 + m = 4 + k = 2 + i, j = compute_jac_indices(n, m, k) + s = coo_matrix((np.ones_like(i), (i, j))).toarray() + s_true = np.array([ + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + ]) + assert_array_equal(s, s_true) + + +def test_compute_global_jac(): + n = 2 + m = 5 + k = 1 + i_jac, j_jac = compute_jac_indices(2, 5, 1) + x = np.linspace(0, 1, 5) + h = np.diff(x) + y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x))) + p = np.array([3.0]) + + f = sl_fun(x, y, p) + + x_middle = x[:-1] + 0.5 * h + y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1]) + + df_dy, df_dp = sl_fun_jac(x, y, p) + df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p) + + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + + def J_block(h, p): + return np.array([ + [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h], + [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12] + ]) + + J_true = np.zeros((m * n + k, m * n + k)) + for i in range(m - 1): + J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0]) + + J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:]) + J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) + + h**2/6 * (y[1, :-1] - y[1, 1:])) + + J_true[8, 0] = 1 + J_true[9, 8] = 1 + J_true[10, 1] = 1 + J_true[10, 10] = -1 + + assert_allclose(J, J_true, rtol=1e-10) + + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p) + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + assert_allclose(J, J_true, rtol=2e-8, atol=2e-8) + + +def test_parameter_validation(): + x = [0, 1, 0.5] + y = np.zeros((2, 3)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, 4)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + def fun(x, y, p): + return exp_fun(x, y) + def bc(ya, yb, p): + return exp_bc(ya, yb) + + y = np.zeros((2, x.shape[0])) + assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1]) + + def wrong_shape_fun(x, y): + return np.zeros(3) + + assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y) + + S = np.array([[0, 0]]) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S) + + +def test_no_params(): + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0])) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 5) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res**2, axis=0)**0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_with_params(): + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2, x.shape[0])) + + for fun_jac in [None, sl_fun_jac]: + for bc_jac in [None, sl_bc_jac]: + sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 10) + + assert_allclose(sol.p, [1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], sl_sol(x_test, [1]), + rtol=1e-4, atol=1e-4) + + f_test = sl_fun(x_test, sol_test, [1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_singular_term(): + x = np.linspace(0, 1, 10) + x_test = np.linspace(0.05, 1, 100) + y = np.empty((2, 10)) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + S = np.array([[0, 0], [0, -2]]) + + for fun_jac in [None, emden_fun_jac]: + for bc_jac in [None, emden_bc_jac]: + sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 10) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5) + + f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_complex(): + # The test is essentially the same as test_no_params, but boundary + # conditions are turned into complex. + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0]), dtype=complex) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5) + assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), + axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_failures(): + x = np.linspace(0, 1, 2) + y = np.zeros((2, x.size)) + res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5) + assert_equal(res.status, 1) + assert_(not res.success) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.size)) + res = solve_bvp(undefined_fun, undefined_bc, x, y) + assert_equal(res.status, 2) + assert_(not res.success) + + +def test_big_problem(): + n = 30 + x = np.linspace(0, 1, 5) + y = np.zeros((2 * n, x.size)) + sol = solve_bvp(big_fun, big_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x) + + assert_allclose(sol_test[0], big_sol(x, n)) + + f_test = big_fun(x, sol_test) + r = sol.sol(x, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_big_problem_with_parameters(): + n = 30 + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2 * n, x.size)) + + for fun_jac in [None, big_fun_with_parameters_jac]: + for bc_jac in [None, big_bc_with_parameters_jac]: + sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x, + y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_allclose(sol.p, [1, 1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + for isol in range(0, n, 4): + assert_allclose(sol_test[isol], + big_sol_with_parameters(x_test, [1, 1])[0], + rtol=1e-4, atol=1e-4) + assert_allclose(sol_test[isol + 2], + big_sol_with_parameters(x_test, [1, 1])[1], + rtol=1e-4, atol=1e-4) + + f_test = big_fun_with_parameters(x_test, sol_test, [1, 1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_shock_layer(): + x = np.linspace(-1, 1, 5) + x_test = np.linspace(-1, 1, 100) + y = np.zeros((2, x.size)) + sol = solve_bvp(shock_fun, shock_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 110) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = shock_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_nonlin_bc(): + x = np.linspace(0, 0.1, 5) + x_test = x + y = np.zeros([2, x.size]) + sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 8) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = nonlin_bc_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_verbose(): + # Smoke test that checks the printing does something and does not crash + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.shape[0])) + for verbose in [0, 1, 2]: + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose) + text = sys.stdout.getvalue() + finally: + sys.stdout = old_stdout + + assert_(sol.success) + if verbose == 0: + assert_(not text, text) + if verbose >= 1: + assert_("Solved in" in text, text) + if verbose >= 2: + assert_("Max residual" in text, text) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py new file mode 100644 index 0000000000000000000000000000000000000000..ff228ed1719641b5b7013defef5e74dbfd0e07e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py @@ -0,0 +1,834 @@ +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers +""" +Tests for numerical integration. +""" +import numpy as np +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, + allclose) + +from numpy.testing import ( + assert_, assert_array_almost_equal, + assert_allclose, assert_array_equal, assert_equal, assert_warns) +from pytest import raises as assert_raises +from scipy.integrate import odeint, ode, complex_ode + +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + + +class TestOdeint: + # Check integrate.odeint + + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + + # Basic case + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert_(problem.verify(z, t)) + + # Use tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + if hasattr(problem, 'jac'): + # Use Dfun + z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, + full_output=True) + assert_(problem.verify(z, t)) + + # Use Dfun and tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + Dfun=lambda t, y: problem.jac(y, t), + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem) + + +class TestODEClass: + + ode_class = None # Set in subclass. + + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + def f(t, z): + return problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + def jac(t, z): + return problem.jac(z, t) + + integrator_params = {} + if problem.lband is not None or problem.uband is not None: + integrator_params['uband'] = problem.uband + integrator_params['lband'] = problem.lband + + ig = self.ode_class(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method, + **integrator_params) + + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert_array_equal(z, ig.y) + assert_(ig.successful(), (problem, method)) + assert_(ig.get_return_code() > 0, (problem, method)) + assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) + + +class TestOde(TestODEClass): + + ode_class = ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + # Check the zvode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + def test_concurrent_fail(self): + for sol in ('vode', 'zvode', 'lsoda'): + def f(t, y): + return 1.0 + + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_raises(RuntimeError, r.integrate, r.t + 0.1) + + def test_concurrent_ok(self): + def f(t, y): + return 1.0 + + for k in range(3): + for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.1) + assert_allclose(r2.y, 0.2) + + for sol in ('dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.3) + assert_allclose(r2.y, 0.2) + + +class TestComplexOde(TestODEClass): + + ode_class = complex_ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + else: + self._do_problem(problem, 'vode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + +class TestSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_after_initial_test(self, integrator): + # Check if solout works even if it is set after the initial value. + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_initial_value(y0, t0) + ig.set_solout(solout) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout_after_initial(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_after_initial_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +class TestComplexSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + lband = None + uband = None + + atol = 1e-6 + rtol = 1e-5 + + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: + m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 + Solution:: + u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) + """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) + + k = 4.0 + m = 1.0 + + def f(self, z, t): + tmp = zeros((2, 2), float) + tmp[0, 1] = 1.0 + tmp[1, 0] = -self.k / self.m + return dot(tmp, z) + + def verify(self, zs, t): + omega = sqrt(self.k / self.m) + u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) + + +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j, 2j, 3j, 4j, 5j]) + cmplx = True + + def f(self, z, t): + return 1j*z + + def jac(self, z, t): + return 1j*eye(5) + + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + + def verify(self, zs, t): + u = -2j * np.arctan(10) + return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) + + +class CoupledDecay(ODE): + r""" + 3 coupled decays suited for banded treatment + (banded mode makes it necessary when N>>3) + """ + + stiff = True + stop_t = 0.5 + z0 = [5.0, 7.0, 13.0] + lband = 1 + uband = 0 + + lmbd = [0.17, 0.23, 0.29] # fictitious decay constants + + def f(self, z, t): + lmbd = self.lmbd + return np.array([-lmbd[0]*z[0], + -lmbd[1]*z[1] + lmbd[0]*z[0], + -lmbd[2]*z[2] + lmbd[1]*z[1]]) + + def jac(self, z, t): + # The full Jacobian is + # + # [-lmbd[0] 0 0 ] + # [ lmbd[0] -lmbd[1] 0 ] + # [ 0 lmbd[1] -lmbd[2]] + # + # The lower and upper bandwidths are lband=1 and uband=0, resp. + # The representation of this array in packed format is + # + # [-lmbd[0] -lmbd[1] -lmbd[2]] + # [ lmbd[0] lmbd[1] 0 ] + + lmbd = self.lmbd + j = np.zeros((self.lband + self.uband + 1, 3), order='F') + + def set_j(ri, ci, val): + j[self.uband + ri - ci, ci] = val + set_j(0, 0, -lmbd[0]) + set_j(1, 0, lmbd[0]) + set_j(1, 1, -lmbd[1]) + set_j(2, 1, lmbd[1]) + set_j(2, 2, -lmbd[2]) + return j + + def verify(self, zs, t): + # Formulae derived by hand + lmbd = np.array(self.lmbd) + d10 = lmbd[1] - lmbd[0] + d21 = lmbd[2] - lmbd[1] + d20 = lmbd[2] - lmbd[0] + e0 = np.exp(-lmbd[0] * t) + e1 = np.exp(-lmbd[1] * t) + e2 = np.exp(-lmbd[2] * t) + u = np.vstack(( + self.z0[0] * e0, + self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), + self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + + lmbd[1] * lmbd[0] * self.z0[0] / d10 * + (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] + +#------------------------------------------------------------------------------ + + +def f(t, x): + dxdt = [x[1], -x[0]] + return dxdt + + +def jac(t, x): + j = array([[0.0, 1.0], + [-1.0, 0.0]]) + return j + + +def f1(t, x, omega): + dxdt = [omega*x[1], -omega*x[0]] + return dxdt + + +def jac1(t, x, omega): + j = array([[0.0, omega], + [-omega, 0.0]]) + return j + + +def f2(t, x, omega1, omega2): + dxdt = [omega1*x[1], -omega2*x[0]] + return dxdt + + +def jac2(t, x, omega1, omega2): + j = array([[0.0, omega1], + [-omega2, 0.0]]) + return j + + +def fv(t, x, omega): + dxdt = [omega[0]*x[1], -omega[1]*x[0]] + return dxdt + + +def jacv(t, x, omega): + j = array([[0.0, omega[0]], + [-omega[1], 0.0]]) + return j + + +class ODECheckParameterUse: + """Call an ode-class solver with several cases of parameter use.""" + + # solver_name must be set before tests can be run with this class. + + # Set these in subclasses. + solver_name = '' + solver_uses_jac = False + + def _get_solver(self, f, jac): + solver = ode(f, jac) + if self.solver_uses_jac: + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, + with_jacobian=self.solver_uses_jac) + else: + # XXX Shouldn't set_integrator *always* accept the keyword arg + # 'with_jacobian', and perhaps raise an exception if it is set + # to True if the solver can't actually use it? + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) + return solver + + def _check_solver(self, solver): + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + solver.integrate(pi) + assert_array_almost_equal(solver.y, [-1.0, 0.0]) + + def test_no_params(self): + solver = self._get_solver(f, jac) + self._check_solver(solver) + + def test_one_scalar_param(self): + solver = self._get_solver(f1, jac1) + omega = 1.0 + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_two_scalar_params(self): + solver = self._get_solver(f2, jac2) + omega1 = 1.0 + omega2 = 1.0 + solver.set_f_params(omega1, omega2) + if self.solver_uses_jac: + solver.set_jac_params(omega1, omega2) + self._check_solver(solver) + + def test_vector_param(self): + solver = self._get_solver(fv, jacv) + omega = [1.0, 1.0] + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_warns_on_failure(self): + # Set nsteps small to ensure failure + solver = self._get_solver(f, jac) + solver.set_integrator(self.solver_name, nsteps=1) + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + assert_warns(UserWarning, solver.integrate, pi) + + +class TestDOPRI5CheckParameterUse(ODECheckParameterUse): + solver_name = 'dopri5' + solver_uses_jac = False + + +class TestDOP853CheckParameterUse(ODECheckParameterUse): + solver_name = 'dop853' + solver_uses_jac = False + + +class TestVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'vode' + solver_uses_jac = True + + +class TestZVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'zvode' + solver_uses_jac = True + + +class TestLSODACheckParameterUse(ODECheckParameterUse): + solver_name = 'lsoda' + solver_uses_jac = True + + +def test_odeint_trivial_time(): + # Test that odeint succeeds when given a single time point + # and full_output=True. This is a regression test for gh-4282. + y0 = 1 + t = [0] + y, info = odeint(lambda y, t: -y, y0, t, full_output=True) + assert_array_equal(y, np.array([[y0]])) + + +def test_odeint_banded_jacobian(): + # Test the use of the `Dfun`, `ml` and `mu` options of odeint. + + def func(y, t, c): + return c.dot(y) + + def jac(y, t, c): + return c + + def jac_transpose(y, t, c): + return c.T.copy(order='C') + + def bjac_rows(y, t, c): + jac = np.vstack((np.r_[0, np.diag(c, 1)], + np.diag(c), + np.r_[np.diag(c, -1), 0], + np.r_[np.diag(c, -2), 0, 0])) + return jac + + def bjac_cols(y, t, c): + return bjac_rows(y, t, c).T.copy(order='C') + + c = array([[-205, 0.01, 0.00, 0.0], + [0.1, -2.50, 0.02, 0.0], + [1e-3, 0.01, -2.0, 0.01], + [0.00, 0.00, 0.1, -1.0]]) + + y0 = np.ones(4) + t = np.array([0, 5, 10, 100]) + + # Use the full Jacobian. + sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac) + + # Use the transposed full Jacobian, with col_deriv=True. + sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac_transpose, col_deriv=True) + + # Use the banded Jacobian. + sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_rows, ml=2, mu=1) + + # Use the transposed banded Jacobian, with col_deriv=True. + sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) + + assert_allclose(sol1, sol2, err_msg="sol1 != sol2") + assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") + assert_allclose(sol3, sol4, err_msg="sol3 != sol4") + + # Verify that the number of jacobian evaluations was the same for the + # calls of odeint with a full jacobian and with a banded jacobian. This is + # a regression test--there was a bug in the handling of banded jacobians + # that resulted in an incorrect jacobian matrix being passed to the LSODA + # code. That would cause errors or excessive jacobian evaluations. + assert_array_equal(info1['nje'], info2['nje']) + assert_array_equal(info3['nje'], info4['nje']) + + # Test the use of tfirst + sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), + full_output=True, atol=1e-13, rtol=1e-11, + mxstep=10000, + Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) + # The code should execute the exact same sequence of floating point + # calculations, so these should be exactly equal. We'll be safe and use + # a small tolerance. + assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") + + +def test_odeint_errors(): + def sys1d(x, t): + return -100*x + + def bad1(x, t): + return 1.0/0 + + def bad2(x, t): + return "foo" + + def bad_jac1(x, t): + return 1.0/0 + + def bad_jac2(x, t): + return [["foo"]] + + def sys2d(x, t): + return [-100*x[0], -0.1*x[1]] + + def sys2d_bad_jac(x, t): + return [[1.0/0, 0], [0, -0.1]] + + assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) + assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) + + assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) + assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) + + assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], + Dfun=sys2d_bad_jac) + + +def test_odeint_bad_shapes(): + # Tests of some errors that can occur with odeint. + + def badrhs(x, t): + return [1, -1] + + def sys1(x, t): + return -100*x + + def badjac(x, t): + return [[0, 0, 0]] + + # y0 must be at most 1-d. + bad_y0 = [[0, 0], [0, 0]] + assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) + + # t must be at most 1-d. + bad_t = [[0, 1], [2, 3]] + assert_raises(ValueError, odeint, sys1, [10.0], bad_t) + + # y0 is 10, but badrhs(x, t) returns [1, -1]. + assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) + + # shape of array returned by badjac(x, t) is not correct. + assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) + + +def test_repeated_t_values(): + """Regression test for gh-8217.""" + + def func(x, t): + return -0.25*x + + t = np.zeros(10) + sol = odeint(func, [1.], t) + assert_array_equal(sol, np.ones((len(t), 1))) + + tau = 4*np.log(2) + t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] + sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) + expected_sol = np.array([[1.0, 2.0]]*9 + + [[0.5, 1.0], + [0.25, 0.5], + [0.25, 0.5], + [0.125, 0.25]]) + assert_allclose(sol, expected_sol) + + # Edge case: empty t sequence. + sol = odeint(func, [1.], []) + assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) + + # t values are not monotonic. + assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) + assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py new file mode 100644 index 0000000000000000000000000000000000000000..7d28ccc93f4444f3f2e0b71da01c573d4f903dbc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py @@ -0,0 +1,74 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.integrate import odeint +import scipy.integrate._test_odeint_banded as banded5x5 + + +def rhs(y, t): + dydt = np.zeros_like(y) + banded5x5.banded5x5(t, y, dydt) + return dydt + + +def jac(y, t): + n = len(y) + jac = np.zeros((n, n), order='F') + banded5x5.banded5x5_jac(t, y, 1, 1, jac) + return jac + + +def bjac(y, t): + n = len(y) + bjac = np.zeros((4, n), order='F') + banded5x5.banded5x5_bjac(t, y, 1, 1, bjac) + return bjac + + +JACTYPE_FULL = 1 +JACTYPE_BANDED = 4 + + +def check_odeint(jactype): + if jactype == JACTYPE_FULL: + ml = None + mu = None + jacobian = jac + elif jactype == JACTYPE_BANDED: + ml = 2 + mu = 1 + jacobian = bjac + else: + raise ValueError(f"invalid jactype: {jactype!r}") + + y0 = np.arange(1.0, 6.0) + # These tolerances must match the tolerances used in banded5x5.f. + rtol = 1e-11 + atol = 1e-13 + dt = 0.125 + nsteps = 64 + t = dt * np.arange(nsteps+1) + + sol, info = odeint(rhs, y0, t, + Dfun=jacobian, ml=ml, mu=mu, + atol=atol, rtol=rtol, full_output=True) + yfinal = sol[-1] + odeint_nst = info['nst'][-1] + odeint_nfe = info['nfe'][-1] + odeint_nje = info['nje'][-1] + + y1 = y0.copy() + # Pure Fortran solution. y1 is modified in-place. + nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype) + + # It is likely that yfinal and y1 are *exactly* the same, but + # we'll be cautious and use assert_allclose. + assert_allclose(yfinal, y1, rtol=1e-12) + assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje)) + + +def test_odeint_full_jac(): + check_odeint(JACTYPE_FULL) + + +def test_odeint_banded_jac(): + check_odeint(JACTYPE_BANDED) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..90bf6006cf1f0eb378843e8258ac8459c8b6a496 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py @@ -0,0 +1,677 @@ +import sys +import math +import numpy as np +from numpy import sqrt, cos, sin, arctan, exp, log, pi +from numpy.testing import (assert_, + assert_allclose, assert_array_less, assert_almost_equal) +import pytest + +from scipy.integrate import quad, dblquad, tplquad, nquad +from scipy.special import erf, erfc +from scipy._lib._ccallback import LowLevelCallable + +import ctypes +import ctypes.util +from scipy._lib._ccallback_c import sine_ctypes + +import scipy.integrate._test_multivariate as clib_test + + +def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8): + value, err = value_and_err + assert_allclose(value, tabled_value, atol=err, rtol=0) + if error_tolerance is not None: + assert_array_less(err, error_tolerance) + + +def get_clib_test_routine(name, restype, *argtypes): + ptr = getattr(clib_test, name) + return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes)) + + +class TestCtypesQuad: + def setup_method(self): + if sys.platform == 'win32': + files = ['api-ms-win-crt-math-l1-1-0.dll'] + elif sys.platform == 'darwin': + files = ['libm.dylib'] + else: + files = ['libm.so', 'libm.so.6'] + + for file in files: + try: + self.lib = ctypes.CDLL(file) + break + except OSError: + pass + else: + # This test doesn't work on some Linux platforms (Fedora for + # example) that put an ld script in libm.so - see gh-5370 + pytest.skip("Ctypes can't import libm.so") + + restype = ctypes.c_double + argtypes = (ctypes.c_double,) + for name in ['sin', 'cos', 'tan']: + func = getattr(self.lib, name) + func.restype = restype + func.argtypes = argtypes + + def test_typical(self): + assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0]) + assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0]) + assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0]) + + def test_ctypes_sine(self): + quad(LowLevelCallable(sine_ctypes), 0, 1) + + def test_ctypes_variants(self): + sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double, + ctypes.c_double, ctypes.c_void_p) + + sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double), + ctypes.c_void_p) + + sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double, + ctypes.c_double) + + sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double)) + + sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.c_double) + + all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4] + legacy_sigs = [sin_2, sin_4] + legacy_only_sigs = [sin_4] + + # LowLevelCallables work for new signatures + for j, func in enumerate(all_sigs): + callback = LowLevelCallable(func) + if func in legacy_only_sigs: + pytest.raises(ValueError, quad, callback, 0, pi) + else: + assert_allclose(quad(callback, 0, pi)[0], 2.0) + + # Plain ctypes items work only for legacy signatures + for j, func in enumerate(legacy_sigs): + if func in legacy_sigs: + assert_allclose(quad(func, 0, pi)[0], 2.0) + else: + pytest.raises(ValueError, quad, func, 0, pi) + + +class TestMultivariateCtypesQuad: + def setup_method(self): + restype = ctypes.c_double + argtypes = (ctypes.c_int, ctypes.c_double) + for name in ['_multivariate_typical', '_multivariate_indefinite', + '_multivariate_sin']: + func = get_clib_test_routine(name, restype, *argtypes) + setattr(self, name, func) + + def test_typical(self): + # 1) Typical function with two extra arguments: + assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)), + 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + assert_quad(quad(self._multivariate_indefinite, 0, np.inf), + 0.577215664901532860606512) + + def test_threadsafety(self): + # Ensure multivariate ctypes are threadsafe + def threadsafety(y): + return y + quad(self._multivariate_sin, 0, 1)[0] + assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602) + + +class TestQuad: + def test_typical(self): + # 1) Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + def myfunc(x): # Euler's constant integrand + return -exp(-x)*log(x) + assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512) + + def test_singular(self): + # 3) Singular points in region of integration. + def myfunc(x): + if 0 < x < 2.5: + return sin(x) + elif 2.5 <= x <= 5.0: + return exp(-x) + else: + return 0.0 + + assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]), + 1 - cos(2.5) + exp(-2.5) - exp(-5.0)) + + def test_sine_weighted_finite(self): + # 4) Sine weighted integral (finite limits) + def myfunc(x, a): + return exp(a*(x-1)) + + ome = 2.0**3.4 + assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome), + (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2)) + + def test_sine_weighted_infinite(self): + # 5) Sine weighted integral (infinite limits) + def myfunc(x, a): + return exp(-x*a) + + a = 4.0 + ome = 3.0 + assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome), + ome/(a**2 + ome**2)) + + def test_cosine_weighted_infinite(self): + # 6) Cosine weighted integral (negative infinite limits) + def myfunc(x, a): + return exp(x*a) + + a = 2.5 + ome = 2.3 + assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome), + a/(a**2 + ome**2)) + + def test_algebraic_log_weight(self): + # 6) Algebraic-logarithmic weight. + def myfunc(x, a): + return 1/(1+x+2**(-a)) + + a = 1.5 + assert_quad(quad(myfunc, -1, 1, args=a, weight='alg', + wvar=(-0.5, -0.5)), + pi/sqrt((1+2**(-a))**2 - 1)) + + def test_cauchypv_weight(self): + # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c) + def myfunc(x, a): + return 2.0**(-a)/((x-1)**2+4.0**(-a)) + + a = 0.4 + tabledValue = ((2.0**(-0.4)*log(1.5) - + 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) - + arctan(2.0**(a+2)) - + arctan(2.0**a)) / + (4.0**(-a) + 1)) + assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0), + tabledValue, error_tolerance=1.9e-8) + + def test_b_less_than_a(self): + def f(x, p, q): + return p * np.exp(-q*x) + + val_1, err_1 = quad(f, 0, np.inf, args=(2, 3)) + val_2, err_2 = quad(f, np.inf, 0, args=(2, 3)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_2(self): + def f(x, s): + return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s) + + val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,)) + val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_3(self): + def f(x): + return 1.0 + + val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0)) + val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_full_output(self): + def f(x): + return 1.0 + + res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True) + res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True) + err = max(res_1[1], res_2[1]) + assert_allclose(res_1[0], -res_2[0], atol=err) + + def test_double_integral(self): + # 8) Double Integral test + def simpfunc(y, x): # Note order of arguments. + return x+y + + a, b = 1.0, 2.0 + assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x), + 5/6.0 * (b**3.0-a**3.0)) + + def test_double_integral2(self): + def func(x0, x1, t0, t1): + return x0 + x1 + t0 + t1 + def g(x): + return x + def h(x): + return 2 * x + args = 1, 2 + assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5) + + def test_double_integral3(self): + def func(x0, x1): + return x0 + x1 + 1 + 2 + assert_quad(dblquad(func, 1, 2, 1, 2),6.) + + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, expected", + [ + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)), + (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)), + (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, np.pi) + ] + ) + def test_double_integral_improper( + self, x_lower, x_upper, y_lower, y_upper, expected + ): + # The Gaussian Integral. + def f(x, y): + return np.exp(-x ** 2 - y ** 2) + + assert_quad( + dblquad(f, x_lower, x_upper, y_lower, y_upper), + expected, + error_tolerance=3e-8 + ) + + def test_triple_integral(self): + # 9) Triple Integral test + def simpfunc(z, y, x, t): # Note order of arguments. + return (x+y+z)*t + + a, b = 1.0, 2.0 + assert_quad(tplquad(simpfunc, a, b, + lambda x: x, lambda x: 2*x, + lambda x, y: x - y, lambda x, y: x + y, + (2.,)), + 2*8/3.0 * (b**4.0 - a**4.0)) + + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected", + [ + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (two at a time). + (-np.inf, -1, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, -1, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, 0, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (two at a time). + (-np.inf, 1, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 1, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 0, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (two at a time). + (1, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (1, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (0, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (two at a time). + (-1, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-1, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (0, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = Dz = [-1, inf]. + (1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [1, inf] and Dz = [-1, inf]. + (1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = Dz = [1, inf]. + (-1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-1, inf] and Dz = [1, inf]. + (-1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, + np.pi ** (3 / 2)), + ], + ) + def test_triple_integral_improper( + self, + x_lower, + x_upper, + y_lower, + y_upper, + z_lower, + z_upper, + expected + ): + # The Gaussian Integral. + def f(x, y, z): + return np.exp(-x ** 2 - y ** 2 - z ** 2) + + assert_quad( + tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper), + expected, + error_tolerance=6e-8 + ) + + def test_complex(self): + def tfunc(x): + return np.exp(1j*x) + + assert np.allclose( + quad(tfunc, 0, np.pi/2, complex_func=True)[0], + 1+1j) + + # We consider a divergent case in order to force quadpack + # to return an error message. The output is compared + # against what is returned by explicit integration + # of the parts. + kwargs = {'a': 0, 'b': np.inf, 'full_output': True, + 'weight': 'cos', 'wvar': 1} + res_c = quad(tfunc, complex_func=True, **kwargs) + res_r = quad(lambda x: np.real(np.exp(1j*x)), + complex_func=False, + **kwargs) + res_i = quad(lambda x: np.imag(np.exp(1j*x)), + complex_func=False, + **kwargs) + + np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0]) + np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1]) + + assert len(res_c[2]['real']) == len(res_r[2:]) == 3 + assert res_c[2]['real'][2] == res_r[4] + assert res_c[2]['real'][1] == res_r[3] + assert res_c[2]['real'][0]['lst'] == res_r[2]['lst'] + + assert len(res_c[2]['imag']) == len(res_i[2:]) == 1 + assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst'] + + +class TestNQuad: + def test_fixed_limits(self): + def func1(x0, x1, x2, x3): + val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) + + (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0)) + return val + + def opts_basic(*args): + return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]} + + res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]], + opts=[opts_basic, {}, {}, {}], full_output=True) + assert_quad(res[:-1], 1.5267454070738635) + assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5) + + def test_variable_limits(self): + scale = .1 + + def func2(x0, x1, x2, x3, t0, t1): + val = (x0*x1*x3**2 + np.sin(x2) + 1 + + (1 if x0 + t1*x1 - t0 > 0 else 0)) + return val + + def lim0(x1, x2, x3, t0, t1): + return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1, + scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1] + + def lim1(x2, x3, t0, t1): + return [scale * (t0*x2 + t1*x3) - 1, + scale * (t0*x2 + t1*x3) + 1] + + def lim2(x3, t0, t1): + return [scale * (x3 + t0**2*t1**3) - 1, + scale * (x3 + t0**2*t1**3) + 1] + + def lim3(t0, t1): + return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1] + + def opts0(x1, x2, x3, t0, t1): + return {'points': [t0 - t1*x1]} + + def opts1(x2, x3, t0, t1): + return {} + + def opts2(x3, t0, t1): + return {} + + def opts3(t0, t1): + return {} + + res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0), + opts=[opts0, opts1, opts2, opts3]) + assert_quad(res, 25.066666666666663) + + def test_square_separate_ranges_and_opts(self): + def f(y, x): + return 1.0 + + assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0) + + def test_square_aliased_ranges_and_opts(self): + def f(y, x): + return 1.0 + + r = [-1, 1] + opt = {} + assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0) + + def test_square_separate_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range0(*args): + return (-1, 1) + + def fn_range1(*args): + return (-1, 1) + + def fn_opt0(*args): + return {} + + def fn_opt1(*args): + return {} + + ranges = [fn_range0, fn_range1] + opts = [fn_opt0, fn_opt1] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_square_aliased_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range(*args): + return (-1, 1) + + def fn_opt(*args): + return {} + + ranges = [fn_range, fn_range] + opts = [fn_opt, fn_opt] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_matching_quad(self): + def func(x): + return x**2 + 1 + + res, reserr = quad(func, 0, 4) + res2, reserr2 = nquad(func, ranges=[[0, 4]]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_dblquad(self): + def func2d(x0, x1): + return x0**2 + x1**3 - x0 * x1 + 1 + + res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3) + res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_tplquad(self): + def func3d(x0, x1, x2, c0, c1): + return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2) + + res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2, + lambda x, y: -np.pi, lambda x, y: np.pi, + args=(2, 3)) + res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3)) + assert_almost_equal(res, res2) + + def test_dict_as_opts(self): + try: + nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001}) + except TypeError: + assert False + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc835c47deb7c5500fea7be7585977a93fc8ddd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py @@ -0,0 +1,766 @@ +# mypy: disable-error-code="attr-defined" +import pytest +import numpy as np +from numpy import cos, sin, pi +from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose, + assert_, suppress_warnings) +from hypothesis import given +import hypothesis.strategies as st +import hypothesis.extra.numpy as hyp_num + +from scipy.integrate import (quadrature, romberg, romb, newton_cotes, + cumulative_trapezoid, cumtrapz, trapz, trapezoid, + quad, simpson, simps, fixed_quad, AccuracyWarning, + qmc_quad, cumulative_simpson) +from scipy.integrate._quadrature import _cumulative_simpson_unequal_intervals +from scipy import stats, special + + +class TestFixedQuad: + def test_scalar(self): + n = 4 + expected = 1/(2*n) + got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n) + # quadrature exact for this input + assert_allclose(got, expected, rtol=1e-12) + + def test_vector(self): + n = 4 + p = np.arange(1, 2*n) + expected = 1/(p + 1) + got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n) + assert_allclose(got, expected, rtol=1e-12) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestQuadrature: + def quad(self, x, a, b, args): + raise NotImplementedError + + def test_quadrature(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_quadrature_rtol(self): + def myfunc(x, n, z): # Bessel function integrand + return 1e90 * cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_quadrature_miniter(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + table_val = 0.30614353532540296487 + for miniter in [5, 52]: + val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter) + assert_almost_equal(val, table_val, decimal=7) + assert_(err < 1.0) + + def test_quadrature_single_args(self): + def myfunc(x, n): + return 1e90 * cos(n*x-1.8*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romberg(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_romberg_rtol(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return 1e19*cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10) + table_val = 1e19*0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romb(self): + assert_equal(romb(np.arange(17)), 128) + + def test_romb_gh_3731(self): + # Check that romb makes maximal use of data points + x = np.arange(2**4+1) + y = np.cos(0.2*x) + val = romb(y) + val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) + assert_allclose(val, val2, rtol=1e-8, atol=0) + + # should be equal to romb with 2**k+1 samples + with suppress_warnings() as sup: + sup.filter(AccuracyWarning, "divmax .4. exceeded") + val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4) + assert_allclose(val, val3, rtol=1e-12, atol=0) + + def test_non_dtype(self): + # Check that we work fine with functions returning float + import math + valmath = romberg(math.sin, 0, 1) + expected_val = 0.45969769413185085 + assert_almost_equal(valmath, expected_val, decimal=7) + + def test_newton_cotes(self): + """Test the first few degrees, for evenly spaced points.""" + n = 1 + wts, errcoff = newton_cotes(n, 1) + assert_equal(wts, n*np.array([0.5, 0.5])) + assert_almost_equal(errcoff, -n**3/12.0) + + n = 2 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0) + assert_almost_equal(errcoff, -n**5/2880.0) + + n = 3 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0) + assert_almost_equal(errcoff, -n**5/6480.0) + + n = 4 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) + assert_almost_equal(errcoff, -n**7/1935360.0) + + def test_newton_cotes2(self): + """Test newton_cotes with points that are not evenly spaced.""" + + x = np.array([0.0, 1.5, 2.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 8.0/3 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + x = np.array([0.0, 1.4, 2.1, 3.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 9.0 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + # ignore the DeprecationWarning emitted by the even kwd + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + def test_simpson(self): + y = np.arange(17) + assert_equal(simpson(y), 128) + assert_equal(simpson(y, dx=0.5), 64) + assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32) + + y = np.arange(4) + x = 2**y + assert_equal(simpson(y, x=x, even='avg'), 13.875) + assert_equal(simpson(y, x=x, even='first'), 13.75) + assert_equal(simpson(y, x=x, even='last'), 14) + + # `even='simpson'` + # integral should be exactly 21 + x = np.linspace(1, 4, 4) + def f(x): + return x**2 + + assert_allclose(simpson(f(x), x=x, even='simpson'), 21.0) + assert_allclose(simpson(f(x), x=x, even='avg'), 21 + 1/6) + + # integral should be exactly 114 + x = np.linspace(1, 7, 4) + assert_allclose(simpson(f(x), dx=2.0, even='simpson'), 114) + assert_allclose(simpson(f(x), dx=2.0, even='avg'), 115 + 1/3) + + # `even='simpson'`, test multi-axis behaviour + a = np.arange(16).reshape(4, 4) + x = np.arange(64.).reshape(4, 4, 4) + y = f(x) + for i in range(3): + r = simpson(y, x=x, even='simpson', axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # test when integration axis only has two points + x = np.arange(16).reshape(8, 2) + y = f(x) + for even in ['simpson', 'avg', 'first', 'last']: + r = simpson(y, x=x, even=even, axis=-1) + + integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0]) + assert_allclose(r, integral) + + # odd points, test multi-axis behaviour + a = np.arange(25).reshape(5, 5) + x = np.arange(125).reshape(5, 5, 5) + y = f(x) + for i in range(3): + r = simpson(y, x=x, axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # Tests for checking base case + x = np.array([3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([3, 3, 3, 3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]]) + y = np.power(x, 2) + zero_axis = [0.0, 0.0, 0.0, 0.0] + default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3 + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + # the following should be exact for even='simpson' + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]]) + y = np.power(x, 2) + zero_axis = [0.0, 136.0, 1088.0, 8704.0] + default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3] + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + def test_simpson_deprecations(self): + x = np.linspace(0, 3, 4) + y = x**2 + with pytest.deprecated_call(match="The 'even' keyword is deprecated"): + simpson(y, x=x, even='first') + with pytest.deprecated_call(match="use keyword arguments"): + simpson(y, x) + + @pytest.mark.parametrize('droplast', [False, True]) + def test_simpson_2d_integer_no_x(self, droplast): + # The inputs are 2d integer arrays. The results should be + # identical to the results when the inputs are floating point. + y = np.array([[2, 2, 4, 4, 8, 8, -4, 5], + [4, 4, 2, -4, 10, 22, -2, 10]]) + if droplast: + y = y[:, :-1] + result = simpson(y, axis=-1) + expected = simpson(np.array(y, dtype=np.float64), axis=-1) + assert_equal(result, expected) + + def test_simps(self): + # Basic coverage test for the alias + y = np.arange(5) + x = 2**y + with pytest.deprecated_call(match="simpson"): + assert_allclose( + simpson(y, x=x, dx=0.5), + simps(y, x=x, dx=0.5) + ) + + +@pytest.mark.parametrize('func', [romberg, quadrature]) +def test_deprecate_integrator(func): + message = f"`scipy.integrate.{func.__name__}` is deprecated..." + with pytest.deprecated_call(match=message): + func(np.exp, 0, 1) + + +class TestCumulative_trapezoid: + def test_1d(self): + x = np.linspace(-2, 2, num=5) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = [0., -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, x, initial=None) + assert_allclose(y_int, y_expected[1:]) + + def test_y_nd_x_nd(self): + x = np.arange(3 * 2 * 4).reshape(3, 2, 4) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = np.array([[[0., 0.5, 2., 4.5], + [0., 4.5, 10., 16.5]], + [[0., 8.5, 18., 28.5], + [0., 12.5, 26., 40.5]], + [[0., 16.5, 34., 52.5], + [0., 20.5, 42., 64.5]]]) + + assert_allclose(y_int, y_expected) + + # Try with all axes + shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)] + for axis, shape in zip([0, 1, 2], shapes): + y_int = cumulative_trapezoid(y, x, initial=0, axis=axis) + assert_equal(y_int.shape, (3, 2, 4)) + y_int = cumulative_trapezoid(y, x, initial=None, axis=axis) + assert_equal(y_int.shape, shape) + + def test_y_nd_x_1d(self): + y = np.arange(3 * 2 * 4).reshape(3, 2, 4) + x = np.arange(4)**2 + # Try with all axes + ys_expected = ( + np.array([[[4., 5., 6., 7.], + [8., 9., 10., 11.]], + [[40., 44., 48., 52.], + [56., 60., 64., 68.]]]), + np.array([[[2., 3., 4., 5.]], + [[10., 11., 12., 13.]], + [[18., 19., 20., 21.]]]), + np.array([[[0.5, 5., 17.5], + [4.5, 21., 53.5]], + [[8.5, 37., 89.5], + [12.5, 53., 125.5]], + [[16.5, 69., 161.5], + [20.5, 85., 197.5]]])) + + for axis, y_expected in zip([0, 1, 2], ys_expected): + y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis, + initial=None) + assert_allclose(y_int, y_expected) + + def test_x_none(self): + y = np.linspace(-2, 2, num=5) + + y_int = cumulative_trapezoid(y) + y_expected = [-1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, initial=0) + y_expected = [0, -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3) + y_expected = [-4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3, initial=0) + y_expected = [0, -4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + @pytest.mark.parametrize( + "initial", [1, 0.5] + ) + def test_initial_warning(self, initial): + """If initial is not None or 0, a ValueError is raised.""" + y = np.linspace(0, 10, num=10) + with pytest.deprecated_call(match="`initial`"): + res = cumulative_trapezoid(y, initial=initial) + assert_allclose(res, [initial, *np.cumsum(y[1:] + y[:-1])/2]) + + def test_zero_len_y(self): + with pytest.raises(ValueError, match="At least one point is required"): + cumulative_trapezoid(y=[]) + + def test_cumtrapz(self): + # Basic coverage test for the alias + x = np.arange(3 * 2 * 4).reshape(3, 2, 4) + y = x + with pytest.deprecated_call(match="cumulative_trapezoid"): + assert_allclose(cumulative_trapezoid(y, x, dx=0.5, axis=0, initial=0), + cumtrapz(y, x, dx=0.5, axis=0, initial=0), + rtol=1e-14) + + +class TestTrapezoid: + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_allclose(r, 1) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_allclose(r, qx) + r = trapezoid(q, x=y[None,:, None], axis=1) + assert_allclose(r, qy) + r = trapezoid(q, x=z[None, None,:], axis=2) + assert_allclose(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_allclose(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_allclose(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_allclose(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_allclose(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(y, xm), r) + + def test_trapz_alias(self): + # Basic coverage test for the alias + y = np.arange(4) + x = 2**y + with pytest.deprecated_call(match="trapezoid"): + assert_equal(trapezoid(y, x=x, dx=0.5, axis=0), + trapz(y, x=x, dx=0.5, axis=0)) + + +class TestQMCQuad: + def test_input_validation(self): + message = "`func` must be callable." + with pytest.raises(TypeError, match=message): + qmc_quad("a duck", [0, 0], [1, 1]) + + message = "`func` must evaluate the integrand at points..." + with pytest.raises(ValueError, match=message): + qmc_quad(lambda: 1, [0, 0], [1, 1]) + + def func(x): + assert x.ndim == 1 + return np.sum(x) + message = "Exception encountered when attempting vectorized call..." + with pytest.warns(UserWarning, match=message): + qmc_quad(func, [0, 0], [1, 1]) + + message = "`n_points` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5) + + message = "`n_estimates` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5) + + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck") + + message = "`qrng` must be initialized with dimensionality equal to " + with pytest.raises(ValueError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1)) + + message = r"`log` must be boolean \(`True` or `False`\)." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10) + + def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)): + + ndim = 2 + mean = np.zeros(ndim) + cov = np.eye(ndim) + + def func(x): + return stats.multivariate_normal.pdf(x.T, mean, cov) + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + a = np.zeros(ndim) + b = np.ones(ndim) * signs + res = qmc_quad(func, a, b, n_points=n_points, + n_estimates=n_estimates, qrng=qrng) + ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + atol = special.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI + assert_allclose(res.integral, ref, atol=atol) + assert np.prod(signs)*res.integral > 0 + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + logres = qmc_quad(lambda *args: np.log(func(*args)), a, b, + n_points=n_points, n_estimates=n_estimates, + log=True, qrng=qrng) + assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14) + assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0) + assert_allclose(np.exp(logres.standard_error), + res.standard_error, rtol=1e-14, atol=1e-16) + + @pytest.mark.parametrize("n_points", [2**8, 2**12]) + @pytest.mark.parametrize("n_estimates", [8, 16]) + def test_basic(self, n_points, n_estimates): + self.basic_test(n_points, n_estimates) + + @pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]]) + def test_sign(self, signs): + self.basic_test(signs=signs) + + @pytest.mark.parametrize("log", [False, True]) + def test_zero(self, log): + message = "A lower limit was equal to an upper limit, so" + with pytest.warns(UserWarning, match=message): + res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log) + assert res.integral == (-np.inf if log else 0) + assert res.standard_error == 0 + + def test_flexible_input(self): + # check that qrng is not required + # also checks that for 1d problems, a and b can be scalars + def func(x): + return stats.norm.pdf(x, scale=2) + + res = qmc_quad(func, 0, 1) + ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2) + assert_allclose(res.integral, ref, 1e-2) + + +def cumulative_simpson_nd_reference(y, *, x=None, dx=None, initial=None, axis=-1): + # Use cumulative_trapezoid if length of y < 3 + if y.shape[axis] < 3: + if initial is None: + return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=None) + else: + return initial + cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=0) + + # Ensure that working axis is last axis + y = np.moveaxis(y, axis, -1) + x = np.moveaxis(x, axis, -1) if np.ndim(x) > 1 else x + dx = np.moveaxis(dx, axis, -1) if np.ndim(dx) > 1 else dx + initial = np.moveaxis(initial, axis, -1) if np.ndim(initial) > 1 else initial + + # If `x` is not present, create it from `dx` + n = y.shape[-1] + x = dx * np.arange(n) if dx is not None else x + # Similarly, if `initial` is not present, set it to 0 + initial_was_none = initial is None + initial = 0 if initial_was_none else initial + + # `np.apply_along_axis` accepts only one array, so concatenate arguments + x = np.broadcast_to(x, y.shape) + initial = np.broadcast_to(initial, y.shape[:-1] + (1,)) + z = np.concatenate((y, x, initial), axis=-1) + + # Use `np.apply_along_axis` to compute result + def f(z): + return cumulative_simpson(z[:n], x=z[n:2*n], initial=z[2*n:]) + res = np.apply_along_axis(f, -1, z) + + # Remove `initial` and undo axis move as needed + res = res[..., 1:] if initial_was_none else res + res = np.moveaxis(res, -1, axis) + return res + + +class TestCumulativeSimpson: + x0 = np.arange(4) + y0 = x0**2 + + @pytest.mark.parametrize('use_dx', (False, True)) + @pytest.mark.parametrize('use_initial', (False, True)) + def test_1d(self, use_dx, use_initial): + # Test for exact agreement with polynomial of highest + # possible order (3 if `dx` is constant, 2 otherwise). + rng = np.random.default_rng(82456839535679456794) + n = 10 + + # Generate random polynomials and ground truth + # integral of appropriate order + order = 3 if use_dx else 2 + dx = rng.random() + x = (np.sort(rng.random(n)) if order == 2 + else np.arange(n)*dx + rng.random()) + i = np.arange(order + 1)[:, np.newaxis] + c = rng.random(order + 1)[:, np.newaxis] + y = np.sum(c*x**i, axis=0) + Y = np.sum(c*x**(i + 1)/(i + 1), axis=0) + ref = Y if use_initial else (Y-Y[0])[1:] + + # Integrate with `cumulative_simpson` + initial = Y[0] if use_initial else None + kwarg = {'dx': dx} if use_dx else {'x': x} + res = cumulative_simpson(y, **kwarg, initial=initial) + + # Compare result against reference + if not use_dx: + assert_allclose(res, ref, rtol=2e-15) + else: + i0 = 0 if use_initial else 1 + # all terms are "close" + assert_allclose(res, ref, rtol=0.0025) + # only even-interval terms are "exact" + assert_allclose(res[i0::2], ref[i0::2], rtol=2e-15) + + @pytest.mark.parametrize('axis', np.arange(-3, 3)) + @pytest.mark.parametrize('x_ndim', (1, 3)) + @pytest.mark.parametrize('x_len', (1, 2, 7)) + @pytest.mark.parametrize('i_ndim', (None, 0, 3,)) + @pytest.mark.parametrize('dx', (None, True)) + def test_nd(self, axis, x_ndim, x_len, i_ndim, dx): + # Test behavior of `cumulative_simpson` with N-D `y` + rng = np.random.default_rng(82456839535679456794) + + # determine shapes + shape = [5, 6, x_len] + shape[axis], shape[-1] = shape[-1], shape[axis] + shape_len_1 = shape.copy() + shape_len_1[axis] = 1 + i_shape = shape_len_1 if i_ndim == 3 else () + + # initialize arguments + y = rng.random(size=shape) + x, dx = None, None + if dx: + dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random() + else: + x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1 + else np.sort(rng.random(size=shape[axis]))) + initial = None if i_ndim is None else rng.random(size=i_shape) + + # compare results + res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis) + ref = cumulative_simpson_nd_reference(y, x=x, dx=dx, initial=initial, axis=axis) + np.testing.assert_allclose(res, ref, rtol=1e-15) + + @pytest.mark.parametrize(('message', 'kwarg_update'), [ + ("x must be strictly increasing", dict(x=[2, 2, 3, 4])), + ("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])), + ("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)), + ("At least one point is required", dict(x=[], y=[])), + ("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)), + ("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))), + ("`initial` must either be a scalar or...", dict(initial=np.arange(5))), + ("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))), + ]) + def test_simpson_exceptions(self, message, kwarg_update): + kwargs0 = dict(y=self.y0, x=self.x0, dx=None, initial=None, axis=-1) + with pytest.raises(ValueError, match=message): + cumulative_simpson(**dict(kwargs0, **kwarg_update)) + + def test_special_cases(self): + # Test special cases not checked elsewhere + rng = np.random.default_rng(82456839535679456794) + y = rng.random(size=10) + res = cumulative_simpson(y, dx=0) + assert_equal(res, 0) + + # Should add tests of: + # - all elements of `x` identical + # These should work as they do for `simpson` + + def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x): + """`cumulative_simpson` and `simpson` can be tested against other to verify + they give consistent results. `simpson` will iteratively be called with + successively higher upper limits of integration. This function calculates + the theoretical correction required to `simpson` at even intervals to match + with `cumulative_simpson`. + """ + d = np.diff(x, axis=-1) + sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d) + sub_integrals_h2 = _cumulative_simpson_unequal_intervals( + y[..., ::-1], d[..., ::-1] + )[..., ::-1] + + # Concatenate to build difference array + zeros_shape = (*y.shape[:-1], 1) + theoretical_difference = np.concatenate( + [ + np.zeros(zeros_shape), + (sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]), + np.zeros(zeros_shape), + ], + axis=-1, + ) + # Differences only expected at even intervals. Odd intervals will + # match exactly so there is no correction + theoretical_difference[..., 1::2] = 0.0 + # Note: the first interval will not match from this correction as + # `simpson` uses the trapezoidal rule + return theoretical_difference + + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson_with_default_dx( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + def simpson_reference(y): + return np.stack( + [simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1, + ) + + res = cumulative_simpson(y, dx=1.0) + ref = simpson_reference(y) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x=np.arange(y.shape[-1]) + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) + + + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + interval = 10/(y.shape[-1] - 1) + x = np.linspace(0, 10, num=y.shape[-1]) + x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1) + + def simpson_reference(y, x): + return np.stack( + [simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)], + axis=-1, + ) + + res = cumulative_simpson(y, x=x) + ref = simpson_reference(y, x) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..6e675968eedc0959f3bc3a98e907b73fd3e25ddf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py @@ -0,0 +1,943 @@ +# mypy: disable-error-code="attr-defined" +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +import scipy._lib._elementwise_iterative_method as eim +from scipy import special, stats +from scipy.integrate import quad_vec +from scipy.integrate._tanhsinh import _tanhsinh, _pair_cache, _nsum +from scipy.stats._discrete_distns import _gen_harmonic_gt1 + +class TestTanhSinh: + + # Test problems from [1] Section 6 + def f1(self, t): + return t * np.log(1 + t) + + f1.ref = 0.25 + f1.b = 1 + + def f2(self, t): + return t ** 2 * np.arctan(t) + + f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12 + f2.b = 1 + + def f3(self, t): + return np.exp(t) * np.cos(t) + + f3.ref = (np.exp(np.pi / 2) - 1) / 2 + f3.b = np.pi / 2 + + def f4(self, t): + a = np.sqrt(2 + t ** 2) + return np.arctan(a) / ((1 + t ** 2) * a) + + f4.ref = 5 * np.pi ** 2 / 96 + f4.b = 1 + + def f5(self, t): + return np.sqrt(t) * np.log(t) + + f5.ref = -4 / 9 + f5.b = 1 + + def f6(self, t): + return np.sqrt(1 - t ** 2) + + f6.ref = np.pi / 4 + f6.b = 1 + + def f7(self, t): + return np.sqrt(t) / np.sqrt(1 - t ** 2) + + f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4) + f7.b = 1 + + def f8(self, t): + return np.log(t) ** 2 + + f8.ref = 2 + f8.b = 1 + + def f9(self, t): + return np.log(np.cos(t)) + + f9.ref = -np.pi * np.log(2) / 2 + f9.b = np.pi / 2 + + def f10(self, t): + return np.sqrt(np.tan(t)) + + f10.ref = np.pi * np.sqrt(2) / 2 + f10.b = np.pi / 2 + + def f11(self, t): + return 1 / (1 + t ** 2) + + f11.ref = np.pi / 2 + f11.b = np.inf + + def f12(self, t): + return np.exp(-t) / np.sqrt(t) + + f12.ref = np.sqrt(np.pi) + f12.b = np.inf + + def f13(self, t): + return np.exp(-t ** 2 / 2) + + f13.ref = np.sqrt(np.pi / 2) + f13.b = np.inf + + def f14(self, t): + return np.exp(-t) * np.cos(t) + + f14.ref = 0.5 + f14.b = np.inf + + def f15(self, t): + return np.sin(t) / t + + f15.ref = np.pi / 2 + f15.b = np.inf + + def error(self, res, ref, log=False): + err = abs(res - ref) + + if not log: + return err + + with np.errstate(divide='ignore'): + return np.log10(err) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(42, 0, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=pytest) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf, log=True) + + message = '...must be integers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=object()) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=1+1j) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel="migratory coconut") + + message = '...must be non-negative.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel=-1) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, preserve_shape=2) + + message = '...must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, callback='elderberry') + + @pytest.mark.parametrize("limits, ref", [ + [(0, np.inf), 0.5], # b infinite + [(-np.inf, 0), 0.5], # a infinite + [(-np.inf, np.inf), 1], # a and b infinite + [(np.inf, -np.inf), -1], # flipped limits + [(1, -1), stats.norm.cdf(-1) - stats.norm.cdf(1)], # flipped limits + ]) + def test_integral_transforms(self, limits, ref): + # Check that the integral transforms are behaving for both normal and + # log integration + dist = stats.norm() + + res = _tanhsinh(dist.pdf, *limits) + assert_allclose(res.integral, ref) + + logres = _tanhsinh(dist.logpdf, *limits, log=True) + assert_allclose(np.exp(logres.integral), ref) + # Transformation should not make the result complex unnecessarily + assert (np.issubdtype(logres.integral.dtype, np.floating) if ref > 0 + else np.issubdtype(logres.integral.dtype, np.complexfloating)) + + assert_allclose(np.exp(logres.error), res.error, atol=1e-16) + + # 15 skipped intentionally; it's very difficult numerically + @pytest.mark.parametrize('f_number', range(1, 15)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + rtol = 2e-8 + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert_allclose(res.integral, f.ref, rtol=rtol) + if f_number not in {14}: # mildly underestimates error here + true_error = abs(self.error(res.integral, f.ref)/res.integral) + assert true_error < res.error + + if f_number in {7, 10, 12}: # succeeds, but doesn't know it + return + + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('ref', (0.5, [0.4, 0.6])) + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, ref, case): + distname, params = case + if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}: + # should split up interval at first-derivative discontinuity + pytest.skip('tanh-sinh is not great for non-smooth integrands') + dist = getattr(stats, distname)(*params) + x = dist.interval(ref) + res = _tanhsinh(dist.pdf, *x) + assert_allclose(res.integral, ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.random(shape) + b = rng.random(shape) + p = rng.random(shape) + n = np.prod(shape) + + def f(x, p): + f.ncall += 1 + f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return x**p + f.ncall = 0 + f.feval = 0 + + @np.vectorize + def _tanhsinh_single(a, b, p): + return _tanhsinh(lambda x: x**p, a, b) + + res = _tanhsinh(f, a, b, args=(p,)) + refs = _tanhsinh_single(a, b, p).ravel() + + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.maxlevel.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + # maxlevel = 2 -> 3 function calls (2 initialization, 1 work) + assert np.max(res.maxlevel) >= 2 + assert_equal(np.max(res.maxlevel), f.ncall) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + f.nit += 1 + funcs = [lambda x: np.exp(-x**2), # converges + lambda x: np.exp(x), # reaches maxiter due to order=2 + lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN + res = [funcs[j](x) for x, j in zip(xs, js.ravel())] + return res + f.nit = 0 + + args = (np.arange(3, dtype=np.int64),) + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, args=args) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self): + # Same test as above but using `preserve_shape` option to simplify. + def f(x): + return [np.exp(-x[0]**2), # converges + np.exp(x[1]), # reaches maxiter due to order=2 + np.full_like(x[2], np.nan)[()]] # stops due to NaN + + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, preserve_shape=True) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_preserve_shape(self): + # Test `preserve_shape` option + def f(x): + return np.asarray([[x, np.sin(10 * x)], + [np.cos(30 * x), x * np.sin(100 * x)]]) + + ref = quad_vec(f, 0, 1) + res = _tanhsinh(f, 0, 1, preserve_shape=True) + assert_allclose(res.integral, ref[0]) + + def test_convergence(self): + # demonstrate that number of accurate digits doubles each iteration + f = self.f1 + last_logerr = 0 + for i in range(4): + res = _tanhsinh(f, 0, f.b, minlevel=0, maxlevel=i) + logerr = self.error(res.integral, f.ref, log=True) + assert (logerr < last_logerr * 2 or logerr < -15.5) + last_logerr = logerr + + def test_options_and_result_attributes(self): + # demonstrate that options are behaving as advertised and status + # messages are as intended + def f(x): + f.calls += 1 + f.feval += np.size(x) + return self.f2(x) + f.ref = self.f2.ref + f.b = self.f2.b + default_rtol = 1e-12 + default_atol = f.ref * default_rtol # effective default absolute tol + + # Test default options + f.feval, f.calls = 0, 0 + ref = _tanhsinh(f, 0, f.b) + assert self.error(ref.integral, f.ref) < ref.error < default_atol + assert ref.nfev == f.feval + ref.calls = f.calls # reference number of function calls + assert ref.success + assert ref.status == 0 + + # Test `maxlevel` equal to required max level + # We should get all the same results + f.feval, f.calls = 0, 0 + maxlevel = ref.maxlevel + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + res.calls = f.calls + assert res == ref + + # Now reduce the maximum level. We won't meet tolerances. + f.feval, f.calls = 0, 0 + maxlevel -= 1 + assert maxlevel >= 2 # can't compare errors otherwise + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + assert self.error(res.integral, f.ref) < res.error > default_atol + assert res.nfev == f.feval < ref.nfev + assert f.calls == ref.calls - 1 + assert not res.success + assert res.status == eim._ECONVERR + + # `maxfun` is currently not enforced + + # # Test `maxfun` equal to required number of function evaluations + # # We should get all the same results + # f.feval, f.calls = 0, 0 + # maxfun = ref.nfev + # res = _tanhsinh(f, 0, f.b, maxfun = maxfun) + # assert res == ref + # + # # Now reduce `maxfun`. We won't meet tolerances. + # f.feval, f.calls = 0, 0 + # maxfun -= 1 + # res = _tanhsinh(f, 0, f.b, maxfun=maxfun) + # assert self.error(res.integral, f.ref) < res.error > default_atol + # assert res.nfev == f.feval < ref.nfev + # assert f.calls == ref.calls - 1 + # assert not res.success + # assert res.status == 2 + + # Take this result to be the new reference + ref = res + ref.calls = f.calls + + # Test `atol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + atol = np.nextafter(ref.error, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + atol = np.nextafter(ref.error, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert self.error(res.integral, f.ref) < res.error < atol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + # Test `rtol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + rtol = np.nextafter(ref.error/ref.integral, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + rtol = np.nextafter(ref.error/ref.integral, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('rtol', [1e-4, 1e-14]) + def test_log(self, rtol): + # Test equivalence of log-integration and regular integration + dist = stats.norm() + + test_tols = dict(atol=1e-18, rtol=1e-15) + + # Positive integrand (real log-integrand) + res = _tanhsinh(dist.logpdf, -1, 2, log=True, rtol=np.log(rtol)) + ref = _tanhsinh(dist.pdf, -1, 2, rtol=rtol) + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + # Real integrand (complex log-integrand) + def f(x): + return -dist.logpdf(x)*dist.pdf(x) + + def logf(x): + return np.log(dist.logpdf(x) + 0j) + dist.logpdf(x) + np.pi * 1j + + res = _tanhsinh(logf, -np.inf, np.inf, log=True) + ref = _tanhsinh(f, -np.inf, np.inf) + # In gh-19173, we saw `invalid` warnings on one CI platform. + # Silencing `all` because I can't reproduce locally and don't want + # to risk the need to run CI again. + with np.errstate(all='ignore'): + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + def test_complex(self): + # Test integration of complex integrand + # Finite limits + def f(x): + return np.exp(1j * x) + + res = _tanhsinh(f, 0, np.pi/4) + ref = np.sqrt(2)/2 + (1-np.sqrt(2)/2)*1j + assert_allclose(res.integral, ref) + + # Infinite limits + dist1 = stats.norm(scale=1) + dist2 = stats.norm(scale=2) + def f(x): + return dist1.pdf(x) + 1j*dist2.pdf(x) + + res = _tanhsinh(f, np.inf, -np.inf) + assert_allclose(res.integral, -(1+1j)) + + @pytest.mark.parametrize("maxlevel", range(4)) + def test_minlevel(self, maxlevel): + # Verify that minlevel does not change the values at which the + # integrand is evaluated or the integral/error estimates, only the + # number of function calls + def f(x): + f.calls += 1 + f.feval += np.size(x) + f.x = np.concatenate((f.x, x.ravel())) + return self.f2(x) + f.feval, f.calls, f.x = 0, 0, np.array([]) + + ref = _tanhsinh(f, 0, self.f2.b, minlevel=0, maxlevel=maxlevel) + ref_x = np.sort(f.x) + + for minlevel in range(0, maxlevel + 1): + f.feval, f.calls, f.x = 0, 0, np.array([]) + options = dict(minlevel=minlevel, maxlevel=maxlevel) + res = _tanhsinh(f, 0, self.f2.b, **options) + # Should be very close; all that has changed is the order of values + assert_allclose(res.integral, ref.integral, rtol=4e-16) + # Difference in absolute errors << magnitude of integral + assert_allclose(res.error, ref.error, atol=4e-16 * ref.integral) + assert res.nfev == f.feval == len(f.x) + assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call + assert res.status == ref.status + assert_equal(ref_x, np.sort(f.x)) + + def test_improper_integrals(self): + # Test handling of infinite limits of integration (mixed with finite limits) + def f(x): + x[np.isinf(x)] = np.nan + return np.exp(-x**2) + a = [-np.inf, 0, -np.inf, np.inf, -20, -np.inf, -20] + b = [np.inf, np.inf, 0, -np.inf, 20, 20, np.inf] + ref = np.sqrt(np.pi) + res = _tanhsinh(f, a, b) + assert_allclose(res.integral, [ref, ref/2, ref/2, -ref, ref, ref, ref]) + + @pytest.mark.parametrize("limits", ((0, 3), ([-np.inf, 0], [3, 3]))) + @pytest.mark.parametrize("dtype", (np.float32, np.float64)) + def test_dtype(self, limits, dtype): + # Test that dtypes are preserved + a, b = np.asarray(limits, dtype=dtype)[()] + + def f(x): + assert x.dtype == dtype + return np.exp(x) + + rtol = 1e-12 if dtype == np.float64 else 1e-5 + res = _tanhsinh(f, a, b, rtol=rtol) + assert res.integral.dtype == dtype + assert res.error.dtype == dtype + assert np.all(res.success) + assert_allclose(res.integral, np.exp(b)-np.exp(a), rtol=rtol) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + minlevel, maxlevel = 0, 2 + maxiter = maxlevel - minlevel + 1 + kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15) + res = _tanhsinh(f, a, b, **kwargs) + assert not res.success + assert res.maxlevel == maxlevel + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'integral') + assert res.status == 1 + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + + del kwargs['maxlevel'] + res2 = _tanhsinh(f, a, b, **kwargs, callback=callback) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert callback.res[key] == 1 + assert res[key] == -2 + assert res2[key] == -4 + else: + assert res2[key] == callback.res[key] == res[key] + + def test_jumpstart(self): + # The intermediate results at each level i should be the same as the + # final results when jumpstarting at level i; i.e. minlevel=maxlevel=i + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + def callback(res): + callback.integrals.append(res.integral) + callback.errors.append(res.error) + callback.integrals = [] + callback.errors = [] + + maxlevel = 4 + _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback) + + integrals = [] + errors = [] + for i in range(maxlevel + 1): + res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i) + integrals.append(res.integral) + errors.append(res.error) + + assert_allclose(callback.integrals[1:], integrals, rtol=1e-15) + assert_allclose(callback.errors[1:], errors, rtol=1e-15, atol=1e-16) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 + + res = _tanhsinh(f, 0, 1) + assert res.success + assert_allclose(res.integral, 1/100) + + # Test levels 0 and 1; error is NaN + res = _tanhsinh(f, 0, 1, maxlevel=0) + assert res.integral > 0 + assert_equal(res.error, np.nan) + res = _tanhsinh(f, 0, 1, maxlevel=1) + assert res.integral > 0 + assert_equal(res.error, np.nan) + + # Tes equal left and right integration limits + res = _tanhsinh(f, 1, 1) + assert res.success + assert res.maxlevel == -1 + assert_allclose(res.integral, 0) + + # Test scalar `args` (not in tuple) + def f(x, c): + return x**c + + res = _tanhsinh(f, 0, 1, args=99) + assert_allclose(res.integral, 1/100) + + # Test NaNs + a = [np.nan, 0, 0, 0] + b = [1, np.nan, 1, 1] + c = [1, 1, np.nan, 1] + res = _tanhsinh(f, a, b, args=(c,)) + assert_allclose(res.integral, [np.nan, np.nan, np.nan, 0.5]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-3, -3, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.nfev[:3], 1) + + # Test complex integral followed by real integral + # Previously, h0 was of the result dtype. If the `dtype` were complex, + # this could lead to complex cached abscissae/weights. If these get + # cast to real dtype for a subsequent real integral, we would get a + # ComplexWarning. Check that this is avoided. + _pair_cache.xjc = np.empty(0) + _pair_cache.wj = np.empty(0) + _pair_cache.indices = [0] + _pair_cache.h0 = None + res = _tanhsinh(lambda x: x*1j, 0, 1) + assert_allclose(res.integral, 0.5*1j) + res = _tanhsinh(lambda x: x, 0, 1) + assert_allclose(res.integral, 0.5) + + # Test zero-size + shape = (0, 3) + res = _tanhsinh(lambda x: x, 0, np.zeros(shape)) + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + assert_equal(res[attr].shape, shape) + + +class TestNSum: + rng = np.random.default_rng(5895448232066142650) + p = rng.uniform(1, 10, size=10) + + def f1(self, k): + # Integers are never passed to `f1`; if they were, we'd get + # integer to negative integer power error + return k**(-2) + + f1.ref = np.pi**2/6 + f1.a = 1 + f1.b = np.inf + f1.args = tuple() + + def f2(self, k, p): + return 1 / k**p + + f2.ref = special.zeta(p, 1) + f2.a = 1 + f2.b = np.inf + f2.args = (p,) + + def f3(self, k, p): + return 1 / k**p + + f3.a = 1 + f3.b = rng.integers(5, 15, size=(3, 1)) + f3.ref = _gen_harmonic_gt1(f3.b, p) + f3.args = (p,) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _nsum(42, f.a, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _nsum(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, None) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, step=object()) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=pytest) + + with np.errstate(all='ignore'): + res = _nsum(f, [np.nan, -np.inf, np.inf], 1) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 10, [np.nan, 1]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 1, 10, step=[np.nan, -np.inf, np.inf, -1, 0]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf, log=True) + + message = '...must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=3.5) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=-2) + + @pytest.mark.parametrize('f_number', range(1, 4)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + res = _nsum(f, f.a, f.b, args=f.args) + assert_allclose(res.sum, f.ref) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + with np.errstate(divide='ignore'): + logres = _nsum(lambda *args: np.log(f(*args)), + f.a, f.b, log=True, args=f.args) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100]) + def test_integral(self, maxterms): + # test precise behavior of integral approximation + f = self.f1 + + def logf(x): + return -2*np.log(x) + + def F(x): + return -1 / x + + a = np.asarray([1, 5])[:, np.newaxis] + b = np.asarray([20, 100, np.inf])[:, np.newaxis, np.newaxis] + step = np.asarray([0.5, 1, 2]).reshape((-1, 1, 1, 1)) + nsteps = np.floor((b - a)/step) + b_original = b + b = a + nsteps*step + + k = a + maxterms*step + # partial sum + direct = f(a + np.arange(maxterms)*step).sum(axis=-1, keepdims=True) + integral = (F(b) - F(k))/step # integral approximation of remainder + low = direct + integral + f(b) # theoretical lower bound + high = direct + integral + f(k) # theoretical upper bound + ref_sum = (low + high)/2 # _nsum uses average of the two + ref_err = (high - low)/2 # error (assuming perfect quadrature) + + # correct reference values where number of terms < maxterms + a, b, step = np.broadcast_arrays(a, b, step) + for i in np.ndindex(a.shape): + ai, bi, stepi = a[i], b[i], step[i] + if (bi - ai)/stepi + 1 <= maxterms: + direct = f(np.arange(ai, bi+stepi, stepi)).sum() + ref_sum[i] = direct + ref_err[i] = direct * np.finfo(direct).eps + + rtol = 1e-12 + res = _nsum(f, a, b_original, step=step, maxterms=maxterms, rtol=rtol) + assert_allclose(res.sum, ref_sum, rtol=10*rtol) + assert_allclose(res.error, ref_err, rtol=100*rtol) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + i = ((b_original - a)/step + 1 <= maxterms) + assert_allclose(res.sum[i], ref_sum[i], rtol=1e-15) + assert_allclose(res.error[i], ref_err[i], rtol=1e-15) + + logres = _nsum(logf, a, b_original, step=step, log=True, + rtol=np.log(rtol), maxterms=maxterms) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.integers(1, 10, size=shape) + # when the sum can be computed directly or `maxterms` is large enough + # to meet `atol`, there are slight differences (for good reason) + # between vectorized call and looping. + b = np.inf + p = rng.random(shape) + 1 + n = np.prod(shape) + + def f(x, p): + f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1] + return 1 / x ** p + + f.feval = 0 + + @np.vectorize + def _nsum_single(a, b, p, maxterms): + return _nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms) + + res = _nsum(f, a, b, maxterms=1000, args=(p,)) + refs = _nsum_single(a, b, p, maxterms=1000).ravel() + + attrs = ['sum', 'error', 'success', 'status', 'nfev'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + + def test_status(self): + f = self.f2 + + p = [2, 2, 0.9, 1.1] + a = [0, 0, 1, 1] + b = [10, np.inf, np.inf, np.inf] + ref = special.zeta(p, 1) + + with np.errstate(divide='ignore'): # intentionally dividing by zero + res = _nsum(f, a, b, args=(p,)) + + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.status, [-3, -3, -2, 0]) + assert_allclose(res.sum[res.success], ref[res.success]) + + def test_nfev(self): + def f(x): + f.nfev += np.size(x) + return 1 / x**2 + + f.nfev = 0 + res = _nsum(f, 1, 10) + assert_equal(res.nfev, f.nfev) + + f.nfev = 0 + res = _nsum(f, 1, np.inf, atol=1e-6) + assert_equal(res.nfev, f.nfev) + + def test_inclusive(self): + # There was an edge case off-by one bug when `_direct` was called with + # `inclusive=True`. Check that this is resolved. + res = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf, maxterms=500, atol=0.1) + ref = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf) + assert np.all(res.sum > (ref.sum - res.error)) + assert np.all(res.sum < (ref.sum + res.error)) + + def test_special_case(self): + # test equal lower/upper limit + f = self.f1 + a = b = 2 + res = _nsum(f, a, b) + assert_equal(res.sum, f(a)) + + # Test scalar `args` (not in tuple) + res = _nsum(self.f2, 1, np.inf, args=2) + assert_allclose(res.sum, self.f1.ref) # f1.ref is correct w/ args=2 + + # Test 0 size input + a = np.empty((3, 1, 1)) # arbitrary broadcastable shapes + b = np.empty((0, 1)) # could use Hypothesis + p = np.empty(4) # but it's overkill + shape = np.broadcast_shapes(a.shape, b.shape, p.shape) + res = _nsum(self.f2, a, b, args=(p,)) + assert res.sum.shape == shape + assert res.status.shape == shape + assert res.nfev.shape == shape + + # Test maxterms=0 + def f(x): + with np.errstate(divide='ignore'): + return 1 / x + + res = _nsum(f, 0, 10, maxterms=0) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -2 + + res = _nsum(f, 0, 10, maxterms=1) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -3 + + # Test NaNs + # should skip both direct and integral methods if there are NaNs + a = [np.nan, 1, 1, 1] + b = [np.inf, np.nan, np.inf, np.inf] + p = [2, 2, np.nan, 2] + res = _nsum(self.f2, a, b, args=(p,)) + assert_allclose(res.sum, [np.nan, np.nan, np.nan, self.f1.ref]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-1, -1, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + # Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals + assert_equal(res.nfev[:2], 1) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_dtype(self, dtype): + def f(k): + assert k.dtype == dtype + return 1 / k ** np.asarray(2, dtype=dtype)[()] + + a = np.asarray(1, dtype=dtype) + b = np.asarray([10, np.inf], dtype=dtype) + res = _nsum(f, a, b) + assert res.sum.dtype == dtype + assert res.error.dtype == dtype + + rtol = 1e-12 if dtype == np.float64 else 1e-6 + ref = _gen_harmonic_gt1(b, 2) + assert_allclose(res.sum, ref, rtol=rtol) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfbeb794d784ed4ff2c6508d473320ecb0c79200 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138d7e1bb34d4d94f561fd87e21ac83f416d1b84 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt new file mode 100644 index 0000000000000000000000000000000000000000..6c3cff3b12cec4ad050b31cc5d5c327f32784447 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt @@ -0,0 +1,21 @@ +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 0 1 1 1 +1 1 0 0 0 1 1 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +1 1 0 0 0 1 1 +1 1 1 0 1 1 1 +1 0 1 1 1 0 1 +0 0 0 1 0 0 0 +1 0 0 1 0 0 1 +1 1 1 1 1 1 1 +1 0 0 1 0 0 1 +0 0 0 1 0 0 0 +1 0 1 1 1 0 1 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt new file mode 100644 index 0000000000000000000000000000000000000000..c239b0369c9df3e06df9a2fbf048faec2f84941f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt @@ -0,0 +1,294 @@ +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +2 2 2 2 2 2 2 +3 3 3 3 3 3 3 +4 4 4 4 4 4 4 +5 5 5 5 5 5 5 +6 6 6 6 6 6 6 +7 7 7 7 7 7 7 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 3 4 5 6 7 +8 9 10 11 12 13 14 +15 16 17 18 19 20 21 +22 23 24 25 26 27 28 +29 30 31 32 33 34 35 +36 37 38 39 40 41 42 +43 44 45 46 47 48 49 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 3 4 5 6 7 +8 1 2 3 4 5 6 +9 8 1 2 3 4 5 +10 9 8 1 2 3 4 +11 10 9 8 1 2 3 +12 11 10 9 8 1 2 +13 12 11 10 9 8 1 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +1 2 3 4 5 6 7 +2 3 4 5 6 7 8 +3 4 5 6 7 8 9 +4 5 6 7 8 9 10 +5 6 7 8 9 10 11 +6 7 8 9 10 11 12 +7 8 9 10 11 12 13 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 2 0 4 +0 0 0 2 0 0 0 +5 0 2 0 6 0 7 +2 2 0 0 0 7 7 +2 2 2 0 7 7 7 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +3 0 1 0 4 0 2 +0 0 0 1 0 0 0 +5 0 6 0 1 0 7 +5 5 0 0 0 1 1 +5 5 5 0 1 1 1 +1 1 1 0 2 2 2 +3 3 0 0 0 4 4 +5 0 6 0 7 0 8 +0 0 0 9 0 0 0 +10 0 11 0 12 0 13 +14 14 0 0 0 15 15 +16 16 16 0 17 17 17 +1 1 1 0 2 3 3 +1 1 0 0 0 3 3 +1 0 4 0 3 0 3 +0 0 0 3 0 0 0 +3 0 3 0 5 0 6 +3 3 0 0 0 6 6 +3 3 7 0 6 6 6 +1 2 3 0 4 5 6 +7 8 0 0 0 9 10 +11 0 12 0 13 0 14 +0 0 0 15 0 0 0 +16 0 17 0 18 0 19 +20 21 0 0 0 22 23 +24 25 26 0 27 28 29 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 2 0 2 +0 0 0 2 0 0 0 +2 0 2 0 4 0 5 +2 2 0 0 0 5 5 +2 2 2 0 5 5 5 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 4 0 2 +0 0 0 5 0 0 0 +6 0 7 0 8 0 9 +6 6 0 0 0 9 9 +6 6 6 0 9 9 9 +1 2 3 0 4 5 6 +7 1 0 0 0 4 5 +8 0 1 0 9 0 4 +0 0 0 1 0 0 0 +10 0 11 0 1 0 12 +13 10 0 0 0 1 14 +15 13 10 0 16 17 1 +1 2 3 0 4 5 6 +1 2 0 0 0 5 6 +1 0 7 0 8 0 6 +0 0 0 9 0 0 0 +10 0 11 0 12 0 13 +10 14 0 0 0 15 13 +10 14 16 0 17 15 13 +1 1 1 0 1 1 1 +1 1 0 0 0 1 1 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +1 1 0 0 0 1 1 +1 1 1 0 1 1 1 +1 1 2 0 3 3 3 +1 1 0 0 0 3 3 +1 0 1 0 4 0 3 +0 0 0 1 0 0 0 +5 0 6 0 1 0 1 +5 5 0 0 0 1 1 +5 5 5 0 7 1 1 +1 2 1 0 1 3 1 +2 1 0 0 0 1 3 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +4 1 0 0 0 1 5 +1 4 1 0 1 5 1 +1 2 3 0 4 5 6 +2 3 0 0 0 6 7 +3 0 8 0 6 0 9 +0 0 0 6 0 0 0 +10 0 6 0 11 0 12 +13 6 0 0 0 12 14 +6 15 16 0 12 14 17 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 1 0 3 0 2 +0 0 0 1 0 0 0 +4 0 5 0 1 0 1 +4 4 0 0 0 1 1 +4 4 4 0 1 1 1 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +4 0 0 5 0 0 5 +5 5 5 5 5 5 5 +5 0 0 5 0 0 6 +0 0 0 7 0 0 0 +8 0 7 7 7 0 9 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +4 0 0 4 0 0 5 +4 4 4 4 4 4 4 +6 0 0 4 0 0 4 +0 0 0 7 0 0 0 +8 0 7 7 7 0 9 +1 0 2 2 2 0 3 +0 0 0 4 0 0 0 +5 0 0 6 0 0 7 +8 8 8 8 8 8 8 +9 0 0 10 0 0 11 +0 0 0 12 0 0 0 +13 0 14 14 14 0 15 +1 0 2 3 3 0 4 +0 0 0 3 0 0 0 +5 0 0 3 0 0 6 +5 5 3 3 3 6 6 +5 0 0 3 0 0 6 +0 0 0 3 0 0 0 +7 0 3 3 8 0 9 +1 0 2 3 4 0 5 +0 0 0 6 0 0 0 +7 0 0 8 0 0 9 +10 11 12 13 14 15 16 +17 0 0 18 0 0 19 +0 0 0 20 0 0 0 +21 0 22 23 24 0 25 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 3 4 0 5 +0 0 0 2 0 0 0 +6 0 0 7 0 0 8 +9 6 10 11 7 12 13 +14 0 0 10 0 0 12 +0 0 0 15 0 0 0 +16 0 17 18 15 0 19 +1 0 2 3 4 0 5 +0 0 0 3 0 0 0 +6 0 0 3 0 0 7 +6 8 9 3 10 11 7 +6 0 0 3 0 0 7 +0 0 0 3 0 0 0 +12 0 13 3 14 0 15 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 2 3 0 4 +0 0 0 2 0 0 0 +5 0 0 2 0 0 6 +5 5 2 2 2 6 6 +5 0 0 2 0 0 6 +0 0 0 2 0 0 0 +7 0 8 2 2 0 9 +1 0 2 3 2 0 4 +0 0 0 2 0 0 0 +5 0 0 6 0 0 7 +8 5 6 9 6 7 10 +5 0 0 6 0 0 7 +0 0 0 11 0 0 0 +12 0 11 13 11 0 14 +1 0 2 3 4 0 5 +0 0 0 4 0 0 0 +6 0 0 7 0 0 8 +9 10 7 11 12 8 13 +10 0 0 12 0 0 14 +0 0 0 15 0 0 0 +16 0 15 17 18 0 19 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt new file mode 100644 index 0000000000000000000000000000000000000000..35ae8121364d4fb3292c11f2a72333f456fa9c0a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt @@ -0,0 +1,42 @@ +0 0 1 +1 1 1 +1 0 0 +1 0 0 +1 1 1 +0 0 1 +0 0 0 +1 1 1 +0 0 0 +0 1 1 +0 1 0 +1 1 0 +0 0 0 +0 0 0 +0 0 0 +0 1 1 +1 1 1 +1 1 0 +0 1 0 +1 1 1 +0 1 0 +1 0 0 +0 1 0 +0 0 1 +0 1 0 +0 1 0 +0 1 0 +1 1 1 +1 1 1 +1 1 1 +1 1 0 +0 1 0 +0 1 1 +1 0 1 +0 1 0 +1 0 1 +0 0 1 +0 1 0 +1 0 0 +1 1 0 +1 1 1 +0 1 1 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py new file mode 100644 index 0000000000000000000000000000000000000000..ed52ed8477056176e1f5aacbf681b12b0153fee6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py @@ -0,0 +1,102 @@ +import numpy as np +from numpy.testing import assert_allclose + +from scipy import ndimage +from scipy.ndimage import _ctest +from scipy.ndimage import _cytest +from scipy._lib._ccallback import LowLevelCallable + +FILTER1D_FUNCTIONS = [ + lambda filter_size: _ctest.filter1d(filter_size), + lambda filter_size: _cytest.filter1d(filter_size, with_signature=False), + lambda filter_size: LowLevelCallable( + _cytest.filter1d(filter_size, with_signature=True) + ), + lambda filter_size: LowLevelCallable.from_cython( + _cytest, "_filter1d", + _cytest.filter1d_capsule(filter_size), + ), +] + +FILTER2D_FUNCTIONS = [ + lambda weights: _ctest.filter2d(weights), + lambda weights: _cytest.filter2d(weights, with_signature=False), + lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)), + lambda weights: LowLevelCallable.from_cython(_cytest, + "_filter2d", + _cytest.filter2d_capsule(weights),), +] + +TRANSFORM_FUNCTIONS = [ + lambda shift: _ctest.transform(shift), + lambda shift: _cytest.transform(shift, with_signature=False), + lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)), + lambda shift: LowLevelCallable.from_cython(_cytest, + "_transform", + _cytest.transform_capsule(shift),), +] + + +def test_generic_filter(): + def filter2d(footprint_elements, weights): + return (weights*footprint_elements).sum() + + def check(j): + func = FILTER2D_FUNCTIONS[j] + + im = np.ones((20, 20)) + im[:10,:10] = 0 + footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + footprint_size = np.count_nonzero(footprint) + weights = np.ones(footprint_size)/footprint_size + + res = ndimage.generic_filter(im, func(weights), + footprint=footprint) + std = ndimage.generic_filter(im, filter2d, footprint=footprint, + extra_arguments=(weights,)) + assert_allclose(res, std, err_msg=f"#{j} failed") + + for j, func in enumerate(FILTER2D_FUNCTIONS): + check(j) + + +def test_generic_filter1d(): + def filter1d(input_line, output_line, filter_size): + for i in range(output_line.size): + output_line[i] = 0 + for j in range(filter_size): + output_line[i] += input_line[i+j] + output_line /= filter_size + + def check(j): + func = FILTER1D_FUNCTIONS[j] + + im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1)) + filter_size = 3 + + res = ndimage.generic_filter1d(im, func(filter_size), + filter_size) + std = ndimage.generic_filter1d(im, filter1d, filter_size, + extra_arguments=(filter_size,)) + assert_allclose(res, std, err_msg=f"#{j} failed") + + for j, func in enumerate(FILTER1D_FUNCTIONS): + check(j) + + +def test_geometric_transform(): + def transform(output_coordinates, shift): + return output_coordinates[0] - shift, output_coordinates[1] - shift + + def check(j): + func = TRANSFORM_FUNCTIONS[j] + + im = np.arange(12).reshape(4, 3).astype(np.float64) + shift = 0.5 + + res = ndimage.geometric_transform(im, func(shift)) + std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,)) + assert_allclose(res, std, err_msg=f"#{j} failed") + + for j, func in enumerate(TRANSFORM_FUNCTIONS): + check(j) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..135e9a72c94103cc378d87ac9a78e44342bfb55b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py @@ -0,0 +1,1409 @@ +import os.path + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + suppress_warnings, +) +from pytest import raises as assert_raises + +import scipy.ndimage as ndimage + + +from . import types + + +class Test_measurements_stats: + """ndimage._measurements._stats() is a utility used by other functions.""" + + def test_a(self): + x = [0, 1, 2, 6] + labels = [0, 0, 1, 1] + index = [0, 1] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage._measurements._stats( + x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_b(self): + # Same data as test_a, but different labels. The label 9 exceeds the + # length of 'labels', so this test will follow a different code path. + x = [0, 1, 2, 6] + labels = [0, 0, 9, 9] + index = [0, 9] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage._measurements._stats( + x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_a_centered(self): + x = [0, 1, 2, 6] + labels = [0, 0, 1, 1] + index = [0, 1] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_b_centered(self): + x = [0, 1, 2, 6] + labels = [0, 0, 9, 9] + index = [0, 9] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_nonint_labels(self): + x = [0, 1, 2, 6] + labels = [0.0, 0.0, 9.0, 9.0] + index = [0.0, 9.0] + for shp in [(4,), (2, 2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage._measurements._stats( + x, labels=labels, index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + +class Test_measurements_select: + """ndimage._measurements._select() is a utility used by other functions.""" + + def test_basic(self): + x = [0, 1, 6, 2] + cases = [ + ([0, 0, 1, 1], [0, 1]), # "Small" integer labels + ([0, 0, 9, 9], [0, 9]), # A label larger than len(labels) + ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels + ] + for labels, index in cases: + result = ndimage._measurements._select( + x, labels=labels, index=index) + assert_(len(result) == 0) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_max=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [1, 6]) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_min=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [0, 2]) + result = ndimage._measurements._select( + x, labels=labels, index=index, find_min=True, + find_min_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [0, 2]) + assert_array_equal(result[1], [0, 3]) + assert_equal(result[1].dtype.kind, 'i') + result = ndimage._measurements._select( + x, labels=labels, index=index, find_max=True, + find_max_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [1, 6]) + assert_array_equal(result[1], [1, 2]) + assert_equal(result[1].dtype.kind, 'i') + + +def test_label01(): + data = np.ones([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 1) + assert_equal(n, 1) + + +def test_label02(): + data = np.zeros([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 0) + assert_equal(n, 0) + + +def test_label03(): + data = np.ones([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1]) + assert_equal(n, 1) + + +def test_label04(): + data = np.zeros([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [0]) + assert_equal(n, 0) + + +def test_label05(): + data = np.ones([5]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + assert_equal(n, 1) + + +def test_label06(): + data = np.array([1, 0, 1, 1, 0, 1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3]) + assert_equal(n, 3) + + +def test_label07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 0) + + +def test_label08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + assert_equal(n, 4) + + +def test_label09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 0, 3, 3, 0]]) + assert_equal(n, 3) + + +def test_label10(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 1) + + +def test_label11(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 4) + + +def test_label11_inplace(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + n = ndimage.label(data, output=data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(data, expected) + assert_equal(n, 4) + + +def test_label12(): + for type in types: + data = np.array([[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label13(): + for type in types: + data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], + type) + out, n = ndimage.label(data) + expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label_output_typed(): + data = np.ones([5]) + for t in types: + output = np.zeros([5], dtype=t) + n = ndimage.label(data, output=output) + assert_array_almost_equal(output, 1) + assert_equal(n, 1) + + +def test_label_output_dtype(): + data = np.ones([5]) + for t in types: + output, n = ndimage.label(data, output=t) + assert_array_almost_equal(output, 1) + assert output.dtype == t + + +def test_label_output_wrong_size(): + data = np.ones([5]) + for t in types: + output = np.zeros([10], t) + assert_raises((RuntimeError, ValueError), + ndimage.label, data, output=output) + + +def test_label_structuring_elements(): + data = np.loadtxt(os.path.join(os.path.dirname( + __file__), "data", "label_inputs.txt")) + strels = np.loadtxt(os.path.join( + os.path.dirname(__file__), "data", "label_strels.txt")) + results = np.loadtxt(os.path.join( + os.path.dirname(__file__), "data", "label_results.txt")) + data = data.reshape((-1, 7, 7)) + strels = strels.reshape((-1, 3, 3)) + results = results.reshape((-1, 7, 7)) + r = 0 + for i in range(data.shape[0]): + d = data[i, :, :] + for j in range(strels.shape[0]): + s = strels[j, :, :] + assert_equal(ndimage.label(d, s)[0], results[r, :, :]) + r += 1 + + +def test_ticket_742(): + def SE(img, thresh=.7, size=4): + mask = img > thresh + rank = len(mask.shape) + la, co = ndimage.label(mask, + ndimage.generate_binary_structure(rank, rank)) + _ = ndimage.find_objects(la) + + if np.dtype(np.intp) != np.dtype('i'): + shape = (3, 1240, 1240) + a = np.random.rand(np.prod(shape)).reshape(shape) + # shouldn't crash + SE(a) + + +def test_gh_issue_3025(): + """Github issue #3025 - improper merging of labels""" + d = np.zeros((60, 320)) + d[:, :257] = 1 + d[:, 260:] = 1 + d[36, 257] = 1 + d[35, 258] = 1 + d[35, 259] = 1 + assert ndimage.label(d, np.ones((3, 3)))[1] == 1 + + +def test_label_default_dtype(): + test_array = np.random.rand(10, 10) + label, no_features = ndimage.label(test_array > 0.5) + assert_(label.dtype in (np.int32, np.int64)) + # Shouldn't raise an exception + ndimage.find_objects(label) + + +def test_find_objects01(): + data = np.ones([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == [()]) + + +def test_find_objects02(): + data = np.zeros([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == []) + + +def test_find_objects03(): + data = np.ones([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),)]) + + +def test_find_objects04(): + data = np.zeros([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects05(): + data = np.ones([5], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 5, None),)]) + + +def test_find_objects06(): + data = np.array([1, 0, 2, 2, 0, 3]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),), + (slice(2, 4, None),), + (slice(5, 6, None),)]) + + +def test_find_objects07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + (slice(3, 5, None), slice(0, 2, None)), + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_find_objects09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + None, + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_value_indices01(): + "Test dictionary keys and entries" + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + vi = ndimage.value_indices(data, ignore_value=0) + true_keys = [1, 2, 4] + assert_equal(list(vi.keys()), true_keys) + + truevi = {} + for k in true_keys: + truevi[k] = np.where(data == k) + + vi = ndimage.value_indices(data, ignore_value=0) + assert_equal(vi, truevi) + + +def test_value_indices02(): + "Test input checking" + data = np.zeros((5, 4), dtype=np.float32) + msg = "Parameter 'arr' must be an integer array" + with assert_raises(ValueError, match=msg): + ndimage.value_indices(data) + + +def test_value_indices03(): + "Test different input array shapes, from 1-D to 4-D" + for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]: + a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape) + trueKeys = np.unique(a) + vi = ndimage.value_indices(a) + assert_equal(list(vi.keys()), list(trueKeys)) + for k in trueKeys: + trueNdx = np.where(a == k) + assert_equal(vi[k], trueNdx) + + +def test_sum01(): + for type in types: + input = np.array([], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum02(): + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum03(): + for type in types: + input = np.ones([], type) + output = ndimage.sum(input) + assert_almost_equal(output, 1.0) + + +def test_sum04(): + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input) + assert_almost_equal(output, 3.0) + + +def test_sum05(): + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input) + assert_almost_equal(output, 10.0) + + +def test_sum06(): + labels = np.array([], bool) + for type in types: + input = np.array([], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum07(): + labels = np.ones([0, 4], bool) + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum08(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 1.0) + + +def test_sum09(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 4.0) + + +def test_sum10(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_sum11(): + labels = np.array([1, 2], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, + index=2) + assert_almost_equal(output, 6.0) + + +def test_sum12(): + labels = np.array([[1, 2], [2, 4]], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, index=[4, 8, 2]) + assert_array_almost_equal(output, [4.0, 0.0, 5.0]) + + +def test_sum_labels(): + labels = np.array([[1, 2], [2, 4]], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2]) + output_labels = ndimage.sum_labels( + input, labels=labels, index=[4, 8, 2]) + + assert (output_sum == output_labels).all() + assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0]) + + +def test_mean01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_mean02(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_mean03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=2) + assert_almost_equal(output, 3.0) + + +def test_mean04(): + labels = np.array([[1, 2], [2, 4]], np.int8) + with np.errstate(all='ignore'): + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=[4, 8, 2]) + assert_array_almost_equal(output[[0, 2]], [4.0, 2.5]) + assert_(np.isnan(output[1])) + + +def test_minimum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=2) + assert_almost_equal(output, 2.0) + + +def test_minimum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [2.0, 4.0, 0.0]) + + +def test_maximum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_maximum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_maximum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=2) + assert_almost_equal(output, 4.0) + + +def test_maximum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [3.0, 4.0, 0.0]) + + +def test_maximum05(): + # Regression test for ticket #501 (Trac) + x = np.array([-3, -2, -1]) + assert_equal(ndimage.maximum(x), -1) + + +def test_median01(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels, index=[1, 2, 3]) + assert_array_almost_equal(output, [2.5, 4.0, 6.0]) + + +def test_median02(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + output = ndimage.median(a) + assert_almost_equal(output, 1.0) + + +def test_median03(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_median_gh12836_bool(): + # test boolean addition fix on example from gh-12836 + a = np.asarray([1, 1], dtype=bool) + output = ndimage.median(a, labels=np.ones((2,)), index=[1]) + assert_array_almost_equal(output, [1.0]) + + +def test_median_no_int_overflow(): + # test integer overflow fix on example from gh-12836 + a = np.asarray([65, 70], dtype=np.int8) + output = ndimage.median(a, labels=np.ones((2,)), index=[1]) + assert_array_almost_equal(output, [67.5]) + + +def test_variance01(): + with np.errstate(all='ignore'): + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.variance(input) + assert_(np.isnan(output)) + + +def test_variance02(): + for type in types: + input = np.array([1], type) + output = ndimage.variance(input) + assert_almost_equal(output, 0.0) + + +def test_variance03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.variance(input) + assert_almost_equal(output, 1.0) + + +def test_variance04(): + input = np.array([1, 0], bool) + output = ndimage.variance(input) + assert_almost_equal(output, 0.25) + + +def test_variance05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.variance(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_variance06(): + labels = [2, 2, 3, 3, 4] + with np.errstate(all='ignore'): + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.variance(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + + +def test_standard_deviation01(): + with np.errstate(all='ignore'): + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.standard_deviation(input) + assert_(np.isnan(output)) + + +def test_standard_deviation02(): + for type in types: + input = np.array([1], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.0) + + +def test_standard_deviation03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, np.sqrt(1.0)) + + +def test_standard_deviation04(): + input = np.array([1, 0], bool) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.5) + + +def test_standard_deviation05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.standard_deviation(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_standard_deviation06(): + labels = [2, 2, 3, 3, 4] + with np.errstate(all='ignore'): + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.standard_deviation(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + + +def test_standard_deviation07(): + labels = [1] + with np.errstate(all='ignore'): + for type in types: + input = np.array([-0.00619519], type) + output = ndimage.standard_deviation(input, labels, [1]) + assert_array_almost_equal(output, [0]) + + +def test_minimum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum_position(input, labels=labels) + assert_equal(output, (0, 0)) + + +def test_minimum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position04(): + input = np.array([[5, 4, 2, 5], + [3, 7, 1, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (0, 0)) + + +def test_minimum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 2, 3]], type) + output = ndimage.minimum_position(input, labels) + assert_equal(output, (2, 0)) + + +def test_minimum_position06(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, 2) + assert_equal(output, (0, 1)) + + +def test_minimum_position07(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, + [2, 3]) + assert_equal(output[0], (0, 1)) + assert_equal(output[1], (1, 2)) + + +def test_maximum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum_position(input, + labels=labels) + assert_equal(output, (1, 0)) + + +def test_maximum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input) + assert_equal(output, (1, 2)) + + +def test_maximum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], bool) + output = ndimage.maximum_position(input) + assert_equal(output, (0, 0)) + + +def test_maximum_position04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels) + assert_equal(output, (1, 1)) + + +def test_maximum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, 1) + assert_equal(output, (0, 0)) + + +def test_maximum_position06(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1, 2]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (1, 1)) + + +def test_maximum_position07(): + # Test float labels + labels = np.array([1.0, 2.5, 0.0, 4.5]) + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1.0, 4.5]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (0, 3)) + + +def test_extrema01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels) + output2 = ndimage.minimum(input, labels=labels) + output3 = ndimage.maximum(input, labels=labels) + output4 = ndimage.minimum_position(input, + labels=labels) + output5 = ndimage.maximum_position(input, + labels=labels) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema02(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=2) + output2 = ndimage.minimum(input, labels=labels, + index=2) + output3 = ndimage.maximum(input, labels=labels, + index=2) + output4 = ndimage.minimum_position(input, + labels=labels, index=2) + output5 = ndimage.maximum_position(input, + labels=labels, index=2) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema03(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=[2, 3, 8]) + output2 = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + output3 = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + output4 = ndimage.minimum_position(input, + labels=labels, index=[2, 3, 8]) + output5 = ndimage.maximum_position(input, + labels=labels, index=[2, 3, 8]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_extrema04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output1 = ndimage.extrema(input, labels, [1, 2]) + output2 = ndimage.minimum(input, labels, [1, 2]) + output3 = ndimage.maximum(input, labels, [1, 2]) + output4 = ndimage.minimum_position(input, labels, + [1, 2]) + output5 = ndimage.maximum_position(input, labels, + [1, 2]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_center_of_mass01(): + expected = [0.0, 0.0] + for type in types: + input = np.array([[1, 0], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass02(): + expected = [1, 0] + for type in types: + input = np.array([[0, 0], [1, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass03(): + expected = [0, 1] + for type in types: + input = np.array([[0, 1], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass04(): + expected = [1, 1] + for type in types: + input = np.array([[0, 0], [0, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass05(): + expected = [0.5, 0.5] + for type in types: + input = np.array([[1, 1], [1, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass06(): + expected = [0.5, 0.5] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass07(): + labels = [1, 0] + expected = [0.5, 0.0] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass08(): + labels = [1, 2] + expected = [0.5, 1.0] + input = np.array([[5, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels, 2) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass09(): + labels = [1, 2] + expected = [(0.5, 0.0), (0.5, 1.0)] + input = np.array([[1, 2], [1, 1]], bool) + output = ndimage.center_of_mass(input, labels, [1, 2]) + assert_array_almost_equal(output, expected) + + +def test_histogram01(): + expected = np.ones(10) + input = np.arange(10) + output = ndimage.histogram(input, 0, 10, 10) + assert_array_almost_equal(output, expected) + + +def test_histogram02(): + labels = [1, 1, 1, 1, 2, 2, 2, 2] + expected = [0, 2, 0, 1, 1] + input = np.array([1, 1, 3, 4, 3, 3, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, 1) + assert_array_almost_equal(output, expected) + + +def test_histogram03(): + labels = [1, 0, 1, 1, 2, 2, 2, 2] + expected1 = [0, 1, 0, 1, 1] + expected2 = [0, 0, 0, 3, 0] + input = np.array([1, 1, 3, 4, 3, 5, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2)) + + assert_array_almost_equal(output[0], expected1) + assert_array_almost_equal(output[1], expected2) + + +def test_stat_funcs_2d(): + a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]]) + lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]]) + + mean = ndimage.mean(a, labels=lbl, index=[1, 2]) + assert_array_equal(mean, [7.0, 4.0]) + + var = ndimage.variance(a, labels=lbl, index=[1, 2]) + assert_array_equal(var, [2.5, 1.0]) + + std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) + assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) + + med = ndimage.median(a, labels=lbl, index=[1, 2]) + assert_array_equal(med, [7.0, 4.0]) + + min = ndimage.minimum(a, labels=lbl, index=[1, 2]) + assert_array_equal(min, [5, 3]) + + max = ndimage.maximum(a, labels=lbl, index=[1, 2]) + assert_array_equal(max, [9, 5]) + + +class TestWatershedIft: + + def test_watershed_ift01(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift02(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift03(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift04(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift05(self): + data = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 3, 0, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift06(self): + data = np.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift07(self): + shape = (7, 6) + data = np.zeros(shape, dtype=np.uint8) + data = data.transpose() + data[...] = np.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.uint8) + markers = np.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], np.int8) + out = np.zeros(shape, dtype=np.int16) + out = out.transpose() + ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]], + output=out) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift08(self): + # Test cost larger than uint8. See gh-10069. + data = np.array([[256, 0], + [0, 0]], np.uint16) + markers = np.array([[1, 0], + [0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[1, 1], + [1, 1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift09(self): + # Test large cost. See gh-19575 + data = np.array([[np.iinfo(np.uint16).max, 0], + [0, 0]], np.uint16) + markers = np.array([[1, 0], + [0, 0]], np.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[1, 1], + [1, 1]] + assert_allclose(out, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f47d651f32143c1594b1fe833e51f0ec4f5fb7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py @@ -0,0 +1,2395 @@ +import numpy +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_array_equal, + assert_array_almost_equal) +import pytest +from pytest import raises as assert_raises + +from scipy import ndimage + +from . import types + + +class TestNdimageMorphology: + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf01(self, dtype): + # brute force (bf) distance transform + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_bf(data, 'euclidean', + return_indices=True) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 2, 4, 2, 1, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 1, 2, 4, 2, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 2, 1, 2, 3, 3, 3], + [4, 4, 4, 4, 6, 4, 4, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 2, 4, 6, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf02(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_bf(data, 'cityblock', + return_indices=True) + + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 1, 2, 3, 2, 1, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 3, 1, 3, 3, 3, 3], + [4, 4, 4, 4, 7, 4, 4, 4, 4], + [5, 5, 6, 7, 7, 7, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(expected, ft) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf03(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_bf(data, 'chessboard', + return_indices=True) + + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 2, 1, 1, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 1, 1, 2, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 4, 2, 2, 2, 4, 3, 3], + [4, 4, 5, 6, 6, 6, 5, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 5, 6, 6, 7, 8], + [0, 1, 1, 2, 6, 6, 7, 7, 8], + [0, 1, 1, 2, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 6, 6, 7, 7, 8], + [0, 1, 2, 4, 5, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf04(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + tdt, tft = ndimage.distance_transform_bf(data, return_indices=1) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ndimage.distance_transform_bf(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_bf( + data, return_distances=False, return_indices=1) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_bf( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_bf( + data, return_indices=1) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = ndimage.distance_transform_bf( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_bf( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_bf( + data, distances=dt, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf05(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_bf( + data, 'euclidean', return_indices=True, sampling=[2, 2]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 4, 0, 0, 0], + [0, 0, 4, 8, 16, 8, 4, 0, 0], + [0, 0, 4, 16, 32, 16, 4, 0, 0], + [0, 0, 4, 8, 16, 8, 4, 0, 0], + [0, 0, 0, 4, 4, 4, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 2, 1, 2, 3, 3, 3], + [4, 4, 4, 4, 6, 4, 4, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 2, 4, 6, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_bf06(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_bf( + data, 'euclidean', return_indices=True, sampling=[2, 1]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 4, 1, 0, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 1, 4, 9, 4, 1, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 0, 1, 4, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 2, 2, 2, 2, 2], + [3, 3, 3, 3, 2, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4, 4, 4, 4], + [5, 5, 5, 5, 6, 5, 5, 5, 5], + [6, 6, 6, 6, 7, 6, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 6, 6, 6, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 1, 1, 7, 7, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_bf07(self): + # test input validation per discussion on PR #13302 + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + with assert_raises(RuntimeError): + ndimage.distance_transform_bf( + data, return_distances=False, return_indices=False + ) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_cdt01(self, dtype): + # chamfer type distance (cdt) transform + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_cdt( + data, 'cityblock', return_indices=True) + bf = ndimage.distance_transform_bf(data, 'cityblock') + assert_array_almost_equal(bf, out) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 1, 1, 1, 2, 2, 2], + [3, 3, 2, 1, 1, 1, 2, 3, 3], + [4, 4, 4, 4, 1, 4, 4, 4, 4], + [5, 5, 5, 5, 7, 7, 6, 5, 5], + [6, 6, 6, 6, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 1, 1, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_cdt02(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_cdt(data, 'chessboard', + return_indices=True) + bf = ndimage.distance_transform_bf(data, 'chessboard') + assert_array_almost_equal(bf, out) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 1, 1, 1, 2, 2, 2], + [3, 3, 2, 2, 1, 2, 2, 3, 3], + [4, 4, 3, 2, 2, 2, 3, 4, 4], + [5, 5, 4, 6, 7, 6, 4, 5, 5], + [6, 6, 6, 6, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 3, 4, 6, 7, 8], + [0, 1, 1, 2, 2, 6, 6, 7, 8], + [0, 1, 1, 1, 2, 6, 7, 7, 8], + [0, 1, 1, 2, 6, 6, 7, 7, 8], + [0, 1, 2, 2, 5, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_cdt03(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_cdt( + data, return_distances=False, return_indices=True) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_cdt( + data, return_indices=True) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ft = ndimage.distance_transform_cdt( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_cdt( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt(data, distances=dt, + return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + def test_distance_transform_cdt04(self): + # test input validation per discussion on PR #13302 + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + indices_out = numpy.zeros((data.ndim,) + data.shape, dtype=numpy.int32) + with assert_raises(RuntimeError): + ndimage.distance_transform_bf( + data, + return_distances=True, + return_indices=False, + indices=indices_out + ) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_cdt05(self, dtype): + # test custom metric type per discussion on issue #17381 + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + metric_arg = np.ones((3, 3)) + actual = ndimage.distance_transform_cdt(data, metric=metric_arg) + assert actual.sum() == -21 + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_edt01(self, dtype): + # euclidean distance transform (edt) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out, ft = ndimage.distance_transform_edt(data, return_indices=True) + bf = ndimage.distance_transform_bf(data, 'euclidean') + assert_array_almost_equal(bf, out) + + dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype) + dt = dt.astype(numpy.float64) + numpy.multiply(dt, dt, dt) + dt = numpy.add.reduce(dt, axis=0) + numpy.sqrt(dt, dt) + + assert_array_almost_equal(bf, dt) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_edt02(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + tdt, tft = ndimage.distance_transform_edt(data, return_indices=True) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ndimage.distance_transform_edt(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_edt( + data, return_distances=0, return_indices=True) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_edt( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_edt( + data, return_indices=True) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = ndimage.distance_transform_edt( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_edt( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_edt( + data, distances=dt, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_edt03(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2]) + out = ndimage.distance_transform_edt(data, sampling=[2, 2]) + assert_array_almost_equal(ref, out) + + @pytest.mark.parametrize('dtype', types) + def test_distance_transform_edt4(self, dtype): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype) + ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1]) + out = ndimage.distance_transform_edt(data, sampling=[2, 1]) + assert_array_almost_equal(ref, out) + + def test_distance_transform_edt5(self): + # Ticket #954 regression test + out = ndimage.distance_transform_edt(False) + assert_array_almost_equal(out, [0.]) + + def test_distance_transform_edt6(self): + # test input validation per discussion on PR #13302 + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + distances_out = numpy.zeros(data.shape, dtype=numpy.float64) + with assert_raises(RuntimeError): + ndimage.distance_transform_bf( + data, + return_indices=True, + return_distances=False, + distances=distances_out + ) + + def test_generate_structure01(self): + struct = ndimage.generate_binary_structure(0, 1) + assert_array_almost_equal(struct, 1) + + def test_generate_structure02(self): + struct = ndimage.generate_binary_structure(1, 1) + assert_array_almost_equal(struct, [1, 1, 1]) + + def test_generate_structure03(self): + struct = ndimage.generate_binary_structure(2, 1) + assert_array_almost_equal(struct, [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + + def test_generate_structure04(self): + struct = ndimage.generate_binary_structure(2, 2) + assert_array_almost_equal(struct, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + def test_iterate_structure01(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + out = ndimage.iterate_structure(struct, 2) + assert_array_almost_equal(out, [[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + + def test_iterate_structure02(self): + struct = [[0, 1], + [1, 1], + [0, 1]] + out = ndimage.iterate_structure(struct, 2) + assert_array_almost_equal(out, [[0, 0, 1], + [0, 1, 1], + [1, 1, 1], + [0, 1, 1], + [0, 0, 1]]) + + def test_iterate_structure03(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + out = ndimage.iterate_structure(struct, 2, 1) + expected = [[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]] + assert_array_almost_equal(out[0], expected) + assert_equal(out[1], [2, 2]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion01(self, dtype): + data = numpy.ones([], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, 1) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion02(self, dtype): + data = numpy.ones([], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, 1) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion03(self, dtype): + data = numpy.ones([1], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion04(self, dtype): + data = numpy.ones([1], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion05(self, dtype): + data = numpy.ones([3], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion06(self, dtype): + data = numpy.ones([3], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion07(self, dtype): + data = numpy.ones([5], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 1, 1, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion08(self, dtype): + data = numpy.ones([5], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion09(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 0, 0, 0, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion10(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 0, 0, 0, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion11(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 0, 1, 0, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion12(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1) + assert_array_almost_equal(out, [0, 1, 0, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion13(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, origin=1) + assert_array_almost_equal(out, [1, 1, 0, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion14(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + struct = [1, 1] + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 1, 0, 0, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion15(self, dtype): + data = numpy.ones([5], dtype) + data[2] = 0 + struct = [1, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1) + assert_array_almost_equal(out, [1, 0, 0, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion16(self, dtype): + data = numpy.ones([1, 1], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion17(self, dtype): + data = numpy.ones([1, 1], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion18(self, dtype): + data = numpy.ones([1, 3], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0, 0, 0]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion19(self, dtype): + data = numpy.ones([1, 3], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1, 1, 1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion20(self, dtype): + data = numpy.ones([3, 3], dtype) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion21(self, dtype): + data = numpy.ones([3, 3], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion22(self, dtype): + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion23(self, dtype): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion24(self, dtype): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion25(self, dtype): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_erosion26(self, dtype): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_erosion(data, struct, border_value=1, + origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion27(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, border_value=1, + iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_erosion28(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=2, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_erosion29(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, iterations=3) + assert_array_almost_equal(out, expected) + + def test_binary_erosion30(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=3, output=out) + assert_array_almost_equal(out, expected) + + # test with output memory overlap + ndimage.binary_erosion(data, struct, border_value=1, + iterations=3, output=data) + assert_array_almost_equal(data, expected) + + def test_binary_erosion31(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 0, 1], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 1]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=1, output=out, origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion32(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_erosion33(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + mask = [[1, 1, 1, 1, 1, 0, 0], + [1, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, mask=mask, iterations=-1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion34(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, mask=mask) + assert_array_almost_equal(out, expected) + + def test_binary_erosion35(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + tmp = [[0, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 0, 1], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 1]] + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=1, output=out, + origin=(-1, -1), mask=mask) + assert_array_almost_equal(out, expected) + + def test_binary_erosion36(self): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + tmp = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + out = ndimage.binary_erosion(data, struct, mask=mask, + border_value=1, origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion37(self): + a = numpy.array([[1, 0, 1], + [0, 1, 0], + [1, 0, 1]], dtype=bool) + b = numpy.zeros_like(a) + out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0, + border_value=True, brute_force=True) + assert_(out is b) + assert_array_equal( + ndimage.binary_erosion(a, structure=a, iterations=0, + border_value=True), + b) + + def test_binary_erosion38(self): + data = numpy.array([[1, 0, 1], + [0, 1, 0], + [1, 0, 1]], dtype=bool) + iterations = 2.0 + with assert_raises(TypeError): + _ = ndimage.binary_erosion(data, iterations=iterations) + + def test_binary_erosion39(self): + iterations = numpy.int32(3) + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=iterations, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_erosion40(self): + iterations = numpy.int64(3) + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=iterations, output=out) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation01(self, dtype): + data = numpy.ones([], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, 1) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation02(self, dtype): + data = numpy.zeros([], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, 0) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation03(self, dtype): + data = numpy.ones([1], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation04(self, dtype): + data = numpy.zeros([1], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation05(self, dtype): + data = numpy.ones([3], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation06(self, dtype): + data = numpy.zeros([3], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [0, 0, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation07(self, dtype): + data = numpy.zeros([3], dtype) + data[1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation08(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + data[3] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation09(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1, 0, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation10(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + out = ndimage.binary_dilation(data, origin=-1) + assert_array_almost_equal(out, [0, 1, 1, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation11(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + out = ndimage.binary_dilation(data, origin=1) + assert_array_almost_equal(out, [1, 1, 0, 0, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation12(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, [1, 0, 1, 0, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation13(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 0, 1, 0, 1]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation14(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, origin=-1) + assert_array_almost_equal(out, [0, 1, 0, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation15(self, dtype): + data = numpy.zeros([5], dtype) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, + origin=-1, border_value=1) + assert_array_almost_equal(out, [1, 1, 0, 1, 0]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation16(self, dtype): + data = numpy.ones([1, 1], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation17(self, dtype): + data = numpy.zeros([1, 1], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[0]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation18(self, dtype): + data = numpy.ones([1, 3], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1, 1, 1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation19(self, dtype): + data = numpy.ones([3, 3], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation20(self, dtype): + data = numpy.zeros([3, 3], dtype) + data[1, 1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation21(self, dtype): + struct = ndimage.generate_binary_structure(2, 2) + data = numpy.zeros([3, 3], dtype) + data[1, 1] = 1 + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation22(self, dtype): + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation23(self, dtype): + expected = [[1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 0, 1], + [1, 1, 0, 0, 0, 1, 0, 1], + [1, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 0, 1, 0, 0, 1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation24(self, dtype): + expected = [[1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, origin=(1, 1)) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation25(self, dtype): + expected = [[1, 1, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation26(self, dtype): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation27(self, dtype): + struct = [[0, 1], + [1, 1]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation28(self, dtype): + expected = [[1, 1, 1, 1], + [1, 0, 0, 1], + [1, 0, 0, 1], + [1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_dilation29(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = ndimage.binary_dilation(data, struct, iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_dilation30(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_dilation(data, struct, iterations=2, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_dilation31(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = ndimage.binary_dilation(data, struct, iterations=3) + assert_array_almost_equal(out, expected) + + def test_binary_dilation32(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_dilation(data, struct, iterations=3, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_dilation33(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + + out = ndimage.binary_dilation(data, struct, iterations=-1, + mask=mask, border_value=0) + assert_array_almost_equal(out, expected) + + def test_binary_dilation34(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.zeros(mask.shape, bool) + out = ndimage.binary_dilation(data, struct, iterations=-1, + mask=mask, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_dilation35(self, dtype): + tmp = [[1, 1, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + mask = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_dilation(data, mask=mask, + origin=(1, 1), border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_propagation01(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + + out = ndimage.binary_propagation(data, struct, + mask=mask, border_value=0) + assert_array_almost_equal(out, expected) + + def test_binary_propagation02(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.zeros(mask.shape, bool) + out = ndimage.binary_propagation(data, struct, + mask=mask, border_value=1) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_opening01(self, dtype): + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_opening(data) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_opening02(self, dtype): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_opening(data, struct) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_closing01(self, dtype): + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_closing(data) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('dtype', types) + def test_binary_closing02(self, dtype): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_closing(data, struct) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes01(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes02(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes03(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 0, 1, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 1, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_grey_erosion01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.grey_erosion(array, footprint=footprint) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], output) + + def test_grey_erosion01_overlap(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + ndimage.grey_erosion(array, footprint=footprint, output=array) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], array) + + def test_grey_erosion02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + output = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], output) + + def test_grey_erosion03(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[1, 1, 1], [1, 1, 1]] + output = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[1, 1, 0, 0, 0], + [1, 2, 0, 2, 0], + [4, 4, 2, 2, 0]], output) + + def test_grey_dilation01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + output = ndimage.grey_dilation(array, footprint=footprint) + assert_array_almost_equal([[7, 7, 9, 9, 5], + [7, 9, 8, 9, 7], + [8, 8, 8, 7, 7]], output) + + def test_grey_dilation02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + structure = [[0, 0, 0], [0, 0, 0]] + output = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[7, 7, 9, 9, 5], + [7, 9, 8, 9, 7], + [8, 8, 8, 7, 7]], output) + + def test_grey_dilation03(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + structure = [[1, 1, 1], [1, 1, 1]] + output = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[8, 8, 10, 10, 6], + [8, 10, 9, 10, 8], + [9, 9, 9, 8, 8]], output) + + def test_grey_opening01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + tmp = ndimage.grey_erosion(array, footprint=footprint) + expected = ndimage.grey_dilation(tmp, footprint=footprint) + output = ndimage.grey_opening(array, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_grey_opening02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = ndimage.grey_dilation(tmp, footprint=footprint, + structure=structure) + output = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_grey_closing01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + tmp = ndimage.grey_dilation(array, footprint=footprint) + expected = ndimage.grey_erosion(tmp, footprint=footprint) + output = ndimage.grey_closing(array, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_grey_closing02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + expected = ndimage.grey_erosion(tmp, footprint=footprint, + structure=structure) + output = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_morphological_gradient01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 - tmp2 + output = numpy.zeros(array.shape, array.dtype) + ndimage.morphological_gradient(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_morphological_gradient02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 - tmp2 + output = ndimage.morphological_gradient(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_morphological_laplace01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 + tmp2 - 2 * array + output = numpy.zeros(array.shape, array.dtype) + ndimage.morphological_laplace(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_morphological_laplace02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 + tmp2 - 2 * array + output = ndimage.morphological_laplace(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_white_tophat01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + expected = array - tmp + output = numpy.zeros(array.shape, array.dtype) + ndimage.white_tophat(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_white_tophat02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + expected = array - tmp + output = ndimage.white_tophat(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_white_tophat03(self): + array = numpy.array([[1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + expected = numpy.array([[0, 1, 1, 0, 0, 0, 0], + [1, 0, 0, 1, 1, 1, 0], + [1, 0, 0, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 0, 1], + [0, 1, 1, 0, 1, 0, 1], + [0, 1, 1, 0, 0, 0, 1], + [0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_) + + output = ndimage.white_tophat(array, structure=structure) + assert_array_equal(expected, output) + + def test_white_tophat04(self): + array = numpy.eye(5, dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + + # Check that type mismatch is properly handled + output = numpy.empty_like(array, dtype=numpy.float64) + ndimage.white_tophat(array, structure=structure, output=output) + + def test_black_tophat01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + expected = tmp - array + output = numpy.zeros(array.shape, array.dtype) + ndimage.black_tophat(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_black_tophat02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + expected = tmp - array + output = ndimage.black_tophat(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_black_tophat03(self): + array = numpy.array([[1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + expected = numpy.array([[0, 1, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 1, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_) + + output = ndimage.black_tophat(array, structure=structure) + assert_array_equal(expected, output) + + def test_black_tophat04(self): + array = numpy.eye(5, dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + + # Check that type mismatch is properly handled + output = numpy.empty_like(array, dtype=numpy.float64) + ndimage.black_tophat(array, structure=structure, output=output) + + @pytest.mark.parametrize('dtype', types) + def test_hit_or_miss01(self, dtype): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + data = numpy.array([[0, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [0, 1, 0, 1, 1], + [0, 0, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 0, 0, 0, 0]], dtype) + out = numpy.zeros(data.shape, bool) + ndimage.binary_hit_or_miss(data, struct, output=out) + assert_array_almost_equal(expected, out) + + @pytest.mark.parametrize('dtype', types) + def test_hit_or_miss02(self, dtype): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], + [1, 1, 1, 0, 0, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_hit_or_miss(data, struct) + assert_array_almost_equal(expected, out) + + @pytest.mark.parametrize('dtype', types) + def test_hit_or_miss03(self, dtype): + struct1 = [[0, 0, 0], + [1, 1, 1], + [0, 0, 0]] + struct2 = [[1, 1, 1], + [0, 0, 0], + [1, 1, 1]] + expected = [[0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype) + out = ndimage.binary_hit_or_miss(data, struct1, struct2) + assert_array_almost_equal(expected, out) + + +class TestDilateFix: + + def setup_method(self): + # dilation related setup + self.array = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=numpy.uint8) + + self.sq3x3 = numpy.ones((3, 3)) + dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3) + self.dilated3x3 = dilated3x3.view(numpy.uint8) + + def test_dilation_square_structure(self): + result = ndimage.grey_dilation(self.array, structure=self.sq3x3) + # +1 accounts for difference between grey and binary dilation + assert_array_almost_equal(result, self.dilated3x3 + 1) + + def test_dilation_scalar_size(self): + result = ndimage.grey_dilation(self.array, size=3) + assert_array_almost_equal(result, self.dilated3x3) + + +class TestBinaryOpeningClosing: + + def setup_method(self): + a = numpy.zeros((5, 5), dtype=bool) + a[1:4, 1:4] = True + a[4, 4] = True + self.array = a + self.sq3x3 = numpy.ones((3, 3)) + self.opened_old = ndimage.binary_opening(self.array, self.sq3x3, + 1, None, 0) + self.closed_old = ndimage.binary_closing(self.array, self.sq3x3, + 1, None, 0) + + def test_opening_new_arguments(self): + opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None, + 0, None, 0, False) + assert_array_equal(opened_new, self.opened_old) + + def test_closing_new_arguments(self): + closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None, + 0, None, 0, False) + assert_array_equal(closed_new, self.closed_old) + + +def test_binary_erosion_noninteger_iterations(): + # regression test for gh-9905, gh-9909: ValueError for + # non integer iterations + data = numpy.ones([1]) + assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5) + assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5) + + +def test_binary_dilation_noninteger_iterations(): + # regression test for gh-9905, gh-9909: ValueError for + # non integer iterations + data = numpy.ones([1]) + assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5) + assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5) + + +def test_binary_opening_noninteger_iterations(): + # regression test for gh-9905, gh-9909: ValueError for + # non integer iterations + data = numpy.ones([1]) + assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5) + assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5) + + +def test_binary_closing_noninteger_iterations(): + # regression test for gh-9905, gh-9909: ValueError for + # non integer iterations + data = numpy.ones([1]) + assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5) + assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5) + + +def test_binary_closing_noninteger_brute_force_passes_when_true(): + # regression test for gh-9905, gh-9909: ValueError for + # non integer iterations + data = numpy.ones([1]) + + assert ndimage.binary_erosion( + data, iterations=2, brute_force=1.5 + ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5)) + assert ndimage.binary_erosion( + data, iterations=2, brute_force=0.0 + ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0)) + + +@pytest.mark.parametrize( + 'function', + ['binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing'], +) +@pytest.mark.parametrize('iterations', [1, 5]) +@pytest.mark.parametrize('brute_force', [False, True]) +def test_binary_input_as_output(function, iterations, brute_force): + rstate = numpy.random.RandomState(123) + data = rstate.randint(low=0, high=2, size=100).astype(bool) + ndi_func = getattr(ndimage, function) + + # input data is not modified + data_orig = data.copy() + expected = ndi_func(data, brute_force=brute_force, iterations=iterations) + assert_array_equal(data, data_orig) + + # data should now contain the expected result + ndi_func(data, brute_force=brute_force, iterations=iterations, output=data) + assert_array_equal(expected, data) + + +def test_binary_hit_or_miss_input_as_output(): + rstate = numpy.random.RandomState(123) + data = rstate.randint(low=0, high=2, size=100).astype(bool) + + # input data is not modified + data_orig = data.copy() + expected = ndimage.binary_hit_or_miss(data) + assert_array_equal(data, data_orig) + + # data should now contain the expected result + ndimage.binary_hit_or_miss(data, output=data) + assert_array_equal(expected, data) + + +def test_distance_transform_cdt_invalid_metric(): + msg = 'invalid metric provided' + with pytest.raises(ValueError, match=msg): + ndimage.distance_transform_cdt(np.ones((5, 5)), + metric="garbage") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py new file mode 100644 index 0000000000000000000000000000000000000000..a74e55111f8fac906f58a947db4a214da82a3cae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py @@ -0,0 +1,65 @@ +"""Tests for spline filtering.""" +import numpy as np +import pytest + +from numpy.testing import assert_almost_equal + +from scipy import ndimage + + +def get_spline_knot_values(order): + """Knot values to the right of a B-spline's center.""" + knot_values = {0: [1], + 1: [1], + 2: [6, 1], + 3: [4, 1], + 4: [230, 76, 1], + 5: [66, 26, 1]} + + return knot_values[order] + + +def make_spline_knot_matrix(n, order, mode='mirror'): + """Matrix to invert to find the spline coefficients.""" + knot_values = get_spline_knot_values(order) + + matrix = np.zeros((n, n)) + for diag, knot_value in enumerate(knot_values): + indices = np.arange(diag, n) + if diag == 0: + matrix[indices, indices] = knot_value + else: + matrix[indices, indices - diag] = knot_value + matrix[indices - diag, indices] = knot_value + + knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:]) + + if mode == 'mirror': + start, step = 1, 1 + elif mode == 'reflect': + start, step = 0, 1 + elif mode == 'grid-wrap': + start, step = -1, -1 + else: + raise ValueError(f'unsupported mode {mode}') + + for row in range(len(knot_values) - 1): + for idx, knot_value in enumerate(knot_values[row + 1:]): + matrix[row, start + step*idx] += knot_value + matrix[-row - 1, -start - 1 - step*idx] += knot_value + + return matrix / knot_values_sum + + +@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5]) +@pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect']) +def test_spline_filter_vs_matrix_solution(order, mode): + n = 100 + eye = np.eye(n, dtype=float) + spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order, + mode=mode) + spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order, + mode=mode) + matrix = make_spline_knot_matrix(n, order, mode=mode) + assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix)) + assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))