diff --git a/.gitattributes b/.gitattributes index 4ba9194928d6f069f000712e40dc6a349e89456b..26ffaf961338e8447cb9b7e35da7334db8eb1615 100644 --- a/.gitattributes +++ b/.gitattributes @@ -99,3 +99,5 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2612b11b6fdd5ee2155d6f19aeefd5eb438588a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst @@ -0,0 +1,30 @@ +*(This is the OSI approved 3-clause "New BSD License".)* + +Copyright © 2016, wouter bolsterlee + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the author nor the names of the contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..25140023c3ca440ba43ef4248360efb9527901cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA @@ -0,0 +1,48 @@ +Metadata-Version: 2.1 +Name: jsonlines +Version: 4.0.0 +Summary: Library with helpers for the jsonlines file format +Home-page: https://github.com/wbolster/jsonlines +Author: wouter bolsterlee +Author-email: wouter@bolsterl.ee +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Internet :: Log Analysis +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Logging +Classifier: Topic :: Utilities +Requires-Python: >=3.8 +License-File: LICENSE.rst +Requires-Dist: attrs >=19.2.0 + +.. image:: https://pepy.tech/badge/jsonlines + :target: https://pepy.tech/project/jsonlines + +.. image:: https://pepy.tech/badge/jsonlines/month + :target: https://pepy.tech/project/jsonlines + +.. image:: https://anaconda.org/anaconda/anaconda/badges/installer/conda.svg + :target: https://anaconda.org/anaconda/jsonlines + +========= +jsonlines +========= + +``jsonlines`` is a Python library to simplify working with jsonlines_ +and ndjson_ data. + +.. _jsonlines: http://jsonlines.org/ +.. _ndjson: http://ndjson.org/ + +* Documentation: https://jsonlines.readthedocs.io/ + +* Python Package Index (PyPI): https://pypi.python.org/pypi/jsonlines/ + +* Source code and issue tracker: https://github.com/wbolster/jsonlines + diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f75f38bdfe1bad71254cfa4b29b6f77e21dcb6af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD @@ -0,0 +1,11 @@ +jsonlines-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonlines-4.0.0.dist-info/LICENSE.rst,sha256=vKNU5jkrJCH_sfHiNFRcUVQzuSkTYsG7n9EAkiuQ60I,1543 +jsonlines-4.0.0.dist-info/METADATA,sha256=XDMhu0s_WdlpRSAcseysBZnpSInKa5EEMwyEZ-5ZtHE,1565 +jsonlines-4.0.0.dist-info/RECORD,, +jsonlines-4.0.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +jsonlines-4.0.0.dist-info/top_level.txt,sha256=Y-KWmwRS4_Ci-mje2h6XC6xPeGV191NA6XhnbPot6eE,10 +jsonlines/__init__.py,sha256=7R6ohpIk95mz93rtkWQLSK1_1UQWUd9ckaVxgFyfhsA,258 +jsonlines/__pycache__/__init__.cpython-310.pyc,, +jsonlines/__pycache__/jsonlines.cpython-310.pyc,, +jsonlines/jsonlines.py,sha256=PpLVYlWwGiB4UoTVq2hkdp9oJ0ioSO6gW0k8_-P97-w,19895 +jsonlines/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9ad36fe960977ebebfaac0b6682d2373ec0ffb5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +jsonlines diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..9dbf3b139f8139e853ba707ba53929d4d2d7bd6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83ec9ad7775e89f6280286ba11eb9d28cafe49c2f777a3e051bcc881de7449fc +size 56875328 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cdf0ed14bf878836019148068b4fc1b62c0022c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7950b838b307f092f4592a14592f76c33ccda08a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73ed9bce140eb93f6a1f2d61255e9a1b8a649ba6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cudalibxt.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cudalibxt.h new file mode 100644 index 0000000000000000000000000000000000000000..94fcf4745fafa04f57678ba5ee64103f8ebd6444 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cudalibxt.h @@ -0,0 +1,97 @@ + /* Copyright 2013,2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cudalibxt.h +* \brief Public header file for the NVIDIA library multi-GPU support structures +*/ + +#ifndef _CUDA_LIB_XT_H_ +#define _CUDA_LIB_XT_H_ +#include + +#define CUDA_XT_DESCRIPTOR_VERSION 0x01000000 // This is added to CUDART_VERSION + +enum cudaXtCopyType_t { + LIB_XT_COPY_HOST_TO_DEVICE, + LIB_XT_COPY_DEVICE_TO_HOST, + LIB_XT_COPY_DEVICE_TO_DEVICE +} ; +typedef enum cudaXtCopyType_t cudaLibXtCopyType; + +enum libFormat_t { + LIB_FORMAT_CUFFT = 0x0, + LIB_FORMAT_UNDEFINED = 0x1 +}; + +typedef enum libFormat_t libFormat; + +#define MAX_CUDA_DESCRIPTOR_GPUS 64 + +struct cudaXtDesc_t{ + int version; //descriptor version + int nGPUs; //number of GPUs + int GPUs[MAX_CUDA_DESCRIPTOR_GPUS]; //array of device IDs + void *data[MAX_CUDA_DESCRIPTOR_GPUS]; //array of pointers to data, one per GPU + size_t size[MAX_CUDA_DESCRIPTOR_GPUS]; //array of data sizes, one per GPU + void *cudaXtState; //opaque CUDA utility structure +}; +typedef struct cudaXtDesc_t cudaXtDesc; + +struct cudaLibXtDesc_t{ + int version; //descriptor version + cudaXtDesc *descriptor; //multi-GPU memory descriptor + libFormat library; //which library recognizes the format + int subFormat; //library specific enumerator of sub formats + void *libDescriptor; //library specific descriptor e.g. FFT transform plan object +}; +typedef struct cudaLibXtDesc_t cudaLibXtDesc; + + +#endif + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufft.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufft.h new file mode 100644 index 0000000000000000000000000000000000000000..b68a8b8fcaed8b6333781e83ac26b0e5bc31ecbd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufft.h @@ -0,0 +1,317 @@ + /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufft.h +* \brief Public header file for the NVIDIA CUDA FFT library (CUFFT) +*/ + +#ifndef _CUFFT_H_ +#define _CUFFT_H_ + + +#include "cuComplex.h" +#include "driver_types.h" +#include "library_types.h" + +#ifndef CUFFTAPI +#ifdef _WIN32 +#define CUFFTAPI __stdcall +#elif __GNUC__ >= 4 +#define CUFFTAPI __attribute__ ((visibility ("default"))) +#else +#define CUFFTAPI +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define CUFFT_VER_MAJOR 11 +#define CUFFT_VER_MINOR 0 +#define CUFFT_VER_PATCH 2 +#define CUFFT_VER_BUILD 54 + +#define CUFFT_VERSION 11002 + +// CUFFT API function return values +typedef enum cufftResult_t { + CUFFT_SUCCESS = 0x0, + CUFFT_INVALID_PLAN = 0x1, + CUFFT_ALLOC_FAILED = 0x2, + CUFFT_INVALID_TYPE = 0x3, + CUFFT_INVALID_VALUE = 0x4, + CUFFT_INTERNAL_ERROR = 0x5, + CUFFT_EXEC_FAILED = 0x6, + CUFFT_SETUP_FAILED = 0x7, + CUFFT_INVALID_SIZE = 0x8, + CUFFT_UNALIGNED_DATA = 0x9, + CUFFT_INCOMPLETE_PARAMETER_LIST = 0xA, + CUFFT_INVALID_DEVICE = 0xB, + CUFFT_PARSE_ERROR = 0xC, + CUFFT_NO_WORKSPACE = 0xD, + CUFFT_NOT_IMPLEMENTED = 0xE, + CUFFT_LICENSE_ERROR = 0x0F, + CUFFT_NOT_SUPPORTED = 0x10 + +} cufftResult; + +#define MAX_CUFFT_ERROR 0x11 + + +// CUFFT defines and supports the following data types + + +// cufftReal is a single-precision, floating-point real data type. +// cufftDoubleReal is a double-precision, real data type. +typedef float cufftReal; +typedef double cufftDoubleReal; + +// cufftComplex is a single-precision, floating-point complex data type that +// consists of interleaved real and imaginary components. +// cufftDoubleComplex is the double-precision equivalent. +typedef cuComplex cufftComplex; +typedef cuDoubleComplex cufftDoubleComplex; + +// CUFFT transform directions +#define CUFFT_FORWARD -1 // Forward FFT +#define CUFFT_INVERSE 1 // Inverse FFT + +// CUFFT supports the following transform types +typedef enum cufftType_t { + CUFFT_R2C = 0x2a, // Real to Complex (interleaved) + CUFFT_C2R = 0x2c, // Complex (interleaved) to Real + CUFFT_C2C = 0x29, // Complex to Complex, interleaved + CUFFT_D2Z = 0x6a, // Double to Double-Complex + CUFFT_Z2D = 0x6c, // Double-Complex to Double + CUFFT_Z2Z = 0x69 // Double-Complex to Double-Complex +} cufftType; + +// CUFFT supports the following data layouts +typedef enum cufftCompatibility_t { + CUFFT_COMPATIBILITY_FFTW_PADDING = 0x01 // The default value +} cufftCompatibility; + +#define CUFFT_COMPATIBILITY_DEFAULT CUFFT_COMPATIBILITY_FFTW_PADDING + +// +// structure definition used by the shim between old and new APIs +// +#define MAX_SHIM_RANK 3 + +// cufftHandle is a handle type used to store and access CUFFT plans. +typedef int cufftHandle; + + +cufftResult CUFFTAPI cufftPlan1d(cufftHandle *plan, + int nx, + cufftType type, + int batch); + +cufftResult CUFFTAPI cufftPlan2d(cufftHandle *plan, + int nx, int ny, + cufftType type); + +cufftResult CUFFTAPI cufftPlan3d(cufftHandle *plan, + int nx, int ny, int nz, + cufftType type); + +cufftResult CUFFTAPI cufftPlanMany(cufftHandle *plan, + int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch); + +cufftResult CUFFTAPI cufftMakePlan1d(cufftHandle plan, + int nx, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlan2d(cufftHandle plan, + int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlan3d(cufftHandle plan, + int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlanMany(cufftHandle plan, + int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlanMany64(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + long long int *onembed, + long long int ostride, long long int odist, + cufftType type, + long long int batch, + size_t * workSize); + +cufftResult CUFFTAPI cufftGetSizeMany64(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, long long int idist, + long long int *onembed, + long long int ostride, long long int odist, + cufftType type, + long long int batch, + size_t *workSize); + + + + +cufftResult CUFFTAPI cufftEstimate1d(int nx, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimate2d(int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimate3d(int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimateMany(int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftCreate(cufftHandle * handle); + +cufftResult CUFFTAPI cufftGetSize1d(cufftHandle handle, + int nx, + cufftType type, + int batch, + size_t *workSize ); + +cufftResult CUFFTAPI cufftGetSize2d(cufftHandle handle, + int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftGetSize3d(cufftHandle handle, + int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftGetSizeMany(cufftHandle handle, + int rank, int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, int batch, size_t *workArea); + +cufftResult CUFFTAPI cufftGetSize(cufftHandle handle, size_t *workSize); + +cufftResult CUFFTAPI cufftSetWorkArea(cufftHandle plan, void *workArea); + +cufftResult CUFFTAPI cufftSetAutoAllocation(cufftHandle plan, int autoAllocate); + +cufftResult CUFFTAPI cufftExecC2C(cufftHandle plan, + cufftComplex *idata, + cufftComplex *odata, + int direction); + +cufftResult CUFFTAPI cufftExecR2C(cufftHandle plan, + cufftReal *idata, + cufftComplex *odata); + +cufftResult CUFFTAPI cufftExecC2R(cufftHandle plan, + cufftComplex *idata, + cufftReal *odata); + +cufftResult CUFFTAPI cufftExecZ2Z(cufftHandle plan, + cufftDoubleComplex *idata, + cufftDoubleComplex *odata, + int direction); + +cufftResult CUFFTAPI cufftExecD2Z(cufftHandle plan, + cufftDoubleReal *idata, + cufftDoubleComplex *odata); + +cufftResult CUFFTAPI cufftExecZ2D(cufftHandle plan, + cufftDoubleComplex *idata, + cufftDoubleReal *odata); + + +// utility functions +cufftResult CUFFTAPI cufftSetStream(cufftHandle plan, + cudaStream_t stream); + +cufftResult CUFFTAPI cufftDestroy(cufftHandle plan); + +cufftResult CUFFTAPI cufftGetVersion(int *version); + +cufftResult CUFFTAPI cufftGetProperty(libraryPropertyType type, + int *value); + +#ifdef __cplusplus +} +#endif + +#endif /* _CUFFT_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h new file mode 100644 index 0000000000000000000000000000000000000000..12e539e9669298e88c641abde9574117d380c6fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftXt.h @@ -0,0 +1,268 @@ + + /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufftXt.h +* \brief Public header file for the NVIDIA CUDA FFT library (CUFFT) +*/ + +#ifndef _CUFFTXT_H_ +#define _CUFFTXT_H_ +#include "cudalibxt.h" +#include "cufft.h" + + +#ifndef CUFFTAPI +#ifdef _WIN32 +#define CUFFTAPI __stdcall +#else +#define CUFFTAPI +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// +// cufftXtSubFormat identifies the data layout of +// a memory descriptor owned by cufft. +// note that multi GPU cufft does not yet support out-of-place transforms +// + +typedef enum cufftXtSubFormat_t { + CUFFT_XT_FORMAT_INPUT = 0x00, //by default input is in linear order across GPUs + CUFFT_XT_FORMAT_OUTPUT = 0x01, //by default output is in scrambled order depending on transform + CUFFT_XT_FORMAT_INPLACE = 0x02, //by default inplace is input order, which is linear across GPUs + CUFFT_XT_FORMAT_INPLACE_SHUFFLED = 0x03, //shuffled output order after execution of the transform + CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED = 0x04, //shuffled input order prior to execution of 1D transforms + CUFFT_XT_FORMAT_DISTRIBUTED_INPUT = 0x05, + CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT = 0x06, + CUFFT_FORMAT_UNDEFINED = 0x07 +} cufftXtSubFormat; + +// +// cufftXtCopyType specifies the type of copy for cufftXtMemcpy +// +typedef enum cufftXtCopyType_t { + CUFFT_COPY_HOST_TO_DEVICE = 0x00, + CUFFT_COPY_DEVICE_TO_HOST = 0x01, + CUFFT_COPY_DEVICE_TO_DEVICE = 0x02, + CUFFT_COPY_UNDEFINED = 0x03 +} cufftXtCopyType; + +// +// cufftXtQueryType specifies the type of query for cufftXtQueryPlan +// +typedef enum cufftXtQueryType_t { + CUFFT_QUERY_1D_FACTORS = 0x00, + CUFFT_QUERY_UNDEFINED = 0x01 +} cufftXtQueryType; + +typedef struct cufftXt1dFactors_t { + long long int size; + long long int stringCount; + long long int stringLength; + long long int substringLength; + long long int factor1; + long long int factor2; + long long int stringMask; + long long int substringMask; + long long int factor1Mask; + long long int factor2Mask; + int stringShift; + int substringShift; + int factor1Shift; + int factor2Shift; +} cufftXt1dFactors; + +// +// cufftXtWorkAreaPolicy specifies policy for cufftXtSetWorkAreaPolicy +// +typedef enum cufftXtWorkAreaPolicy_t { + CUFFT_WORKAREA_MINIMAL = 0, /* maximum reduction */ + CUFFT_WORKAREA_USER = 1, /* use workSize parameter as limit */ + CUFFT_WORKAREA_PERFORMANCE = 2, /* default - 1x overhead or more, maximum performance */ +} cufftXtWorkAreaPolicy; + +// multi-GPU routines +cufftResult CUFFTAPI cufftXtSetGPUs(cufftHandle handle, int nGPUs, int *whichGPUs); + +cufftResult CUFFTAPI cufftXtMalloc(cufftHandle plan, + cudaLibXtDesc ** descriptor, + cufftXtSubFormat format); + +cufftResult CUFFTAPI cufftXtMemcpy(cufftHandle plan, + void *dstPointer, + void *srcPointer, + cufftXtCopyType type); + +cufftResult CUFFTAPI cufftXtFree(cudaLibXtDesc *descriptor); + +cufftResult CUFFTAPI cufftXtSetWorkArea(cufftHandle plan, void **workArea); + +cufftResult CUFFTAPI cufftXtExecDescriptorC2C(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptorR2C(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorC2R(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorZ2Z(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptorD2Z(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorZ2D(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +// Utility functions + +cufftResult CUFFTAPI cufftXtQueryPlan(cufftHandle plan, void *queryStruct, cufftXtQueryType queryType); + + +// callbacks + + +typedef enum cufftXtCallbackType_t { + CUFFT_CB_LD_COMPLEX = 0x0, + CUFFT_CB_LD_COMPLEX_DOUBLE = 0x1, + CUFFT_CB_LD_REAL = 0x2, + CUFFT_CB_LD_REAL_DOUBLE = 0x3, + CUFFT_CB_ST_COMPLEX = 0x4, + CUFFT_CB_ST_COMPLEX_DOUBLE = 0x5, + CUFFT_CB_ST_REAL = 0x6, + CUFFT_CB_ST_REAL_DOUBLE = 0x7, + CUFFT_CB_UNDEFINED = 0x8 + +} cufftXtCallbackType; + +typedef cufftComplex (*cufftCallbackLoadC)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleComplex (*cufftCallbackLoadZ)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftReal (*cufftCallbackLoadR)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleReal(*cufftCallbackLoadD)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); + +typedef void (*cufftCallbackStoreC)(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreZ)(void *dataOut, size_t offset, cufftDoubleComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreR)(void *dataOut, size_t offset, cufftReal element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreD)(void *dataOut, size_t offset, cufftDoubleReal element, void *callerInfo, void *sharedPointer); + + +cufftResult CUFFTAPI cufftXtSetCallback(cufftHandle plan, void **callback_routine, cufftXtCallbackType cbType, void **caller_info); +cufftResult CUFFTAPI cufftXtClearCallback(cufftHandle plan, cufftXtCallbackType cbType); +cufftResult CUFFTAPI cufftXtSetCallbackSharedSize(cufftHandle plan, cufftXtCallbackType cbType, size_t sharedSize); + +cufftResult CUFFTAPI cufftXtMakePlanMany(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + cudaDataType inputtype, + long long int *onembed, + long long int ostride, + long long int odist, + cudaDataType outputtype, + long long int batch, + size_t *workSize, + cudaDataType executiontype); + +cufftResult CUFFTAPI cufftXtGetSizeMany(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + cudaDataType inputtype, + long long int *onembed, + long long int ostride, + long long int odist, + cudaDataType outputtype, + long long int batch, + size_t *workSize, + cudaDataType executiontype); + + +cufftResult CUFFTAPI cufftXtExec(cufftHandle plan, + void *input, + void *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptor(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtSetWorkAreaPolicy(cufftHandle plan, cufftXtWorkAreaPolicy policy, size_t *workSize); + +cufftResult CUFFTAPI cufftXtSetDistribution(cufftHandle plan, + int rank, + const long long int* lower_input, + const long long int* upper_input, + const long long int* lower_output, + const long long int* upper_output, + const long long int* strides_input, + const long long int* strides_output); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h new file mode 100644 index 0000000000000000000000000000000000000000..6f12b4e1ea68c5a186d73b5d943d2cba0218312f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/include/cufftw.h @@ -0,0 +1,454 @@ + + /* Copyright 2005-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufftw.h +* \brief Public header file for the NVIDIA CUDA FFTW library (CUFFTW) +*/ + +#ifndef _CUFFTW_H_ +#define _CUFFTW_H_ + + +#include +#include "cufft.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// transform direction +#define FFTW_FORWARD -1 +#define FFTW_INVERSE 1 +#define FFTW_BACKWARD 1 + +// Planner flags + +#define FFTW_ESTIMATE 0x01 +#define FFTW_MEASURE 0x02 +#define FFTW_PATIENT 0x03 +#define FFTW_EXHAUSTIVE 0x04 +#define FFTW_WISDOM_ONLY 0x05 + +//Algorithm restriction flags + +#define FFTW_DESTROY_INPUT 0x08 +#define FFTW_PRESERVE_INPUT 0x0C +#define FFTW_UNALIGNED 0x10 + +// CUFFTW defines and supports the following data types + +// note if complex.h has been included we use the C99 complex types +#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined (complex) + typedef double _Complex fftw_complex; + typedef float _Complex fftwf_complex; +#else + typedef double fftw_complex[2]; + typedef float fftwf_complex[2]; +#endif + +typedef void *fftw_plan; + +typedef void *fftwf_plan; + +typedef struct { + int n; + int is; + int os; +} fftw_iodim; + +typedef fftw_iodim fftwf_iodim; + +typedef struct { + ptrdiff_t n; + ptrdiff_t is; + ptrdiff_t os; +} fftw_iodim64; + +typedef fftw_iodim64 fftwf_iodim64; + + +// CUFFTW defines and supports the following double precision APIs + + +fftw_plan CUFFTAPI fftw_plan_dft_1d(int n, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_2d(int n0, + int n1, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_3d(int n0, + int n1, + int n2, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft(int rank, + const int *n, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_1d(int n, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_2d(int n0, + int n1, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_3d(int n0, + int n1, + int n2, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c(int rank, + const int *n, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_1d(int n, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_2d(int n0, + int n1, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_3d(int n0, + int n1, + int n2, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r(int rank, + const int *n, + fftw_complex *in, + double *out, + unsigned flags); + + +fftw_plan CUFFTAPI fftw_plan_many_dft(int rank, + const int *n, + int batch, + fftw_complex *in, + const int *inembed, int istride, int idist, + fftw_complex *out, + const int *onembed, int ostride, int odist, + int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_many_dft_r2c(int rank, + const int *n, + int batch, + double *in, + const int *inembed, int istride, int idist, + fftw_complex *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_many_dft_c2r(int rank, + const int *n, + int batch, + fftw_complex *in, + const int *inembed, int istride, int idist, + double *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + fftw_complex *in, fftw_complex *out, + int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft_r2c(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + double *in, fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft_c2r(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + fftw_complex *in, double *out, + unsigned flags); + +void CUFFTAPI fftw_execute(const fftw_plan plan); + +void CUFFTAPI fftw_execute_dft(const fftw_plan plan, + fftw_complex *idata, + fftw_complex *odata); + +void CUFFTAPI fftw_execute_dft_r2c(const fftw_plan plan, + double *idata, + fftw_complex *odata); + +void CUFFTAPI fftw_execute_dft_c2r(const fftw_plan plan, + fftw_complex *idata, + double *odata); + + +// CUFFTW defines and supports the following single precision APIs + +fftwf_plan CUFFTAPI fftwf_plan_dft_1d(int n, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_2d(int n0, + int n1, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_3d(int n0, + int n1, + int n2, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft(int rank, + const int *n, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_1d(int n, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_2d(int n0, + int n1, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_3d(int n0, + int n1, + int n2, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c(int rank, + const int *n, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_1d(int n, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_2d(int n0, + int n1, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_3d(int n0, + int n1, + int n2, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r(int rank, + const int *n, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft(int rank, + const int *n, + int batch, + fftwf_complex *in, + const int *inembed, int istride, int idist, + fftwf_complex *out, + const int *onembed, int ostride, int odist, + int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft_r2c(int rank, + const int *n, + int batch, + float *in, + const int *inembed, int istride, int idist, + fftwf_complex *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft_c2r(int rank, + const int *n, + int batch, + fftwf_complex *in, + const int *inembed, int istride, int idist, + float *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + fftwf_complex *in, fftwf_complex *out, + int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft_r2c(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + float *in, fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft_c2r(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + fftwf_complex *in, float *out, + unsigned flags); + +void CUFFTAPI fftwf_execute(const fftw_plan plan); + +void CUFFTAPI fftwf_execute_dft(const fftwf_plan plan, + fftwf_complex *idata, + fftwf_complex *odata); + +void CUFFTAPI fftwf_execute_dft_r2c(const fftwf_plan plan, + float *idata, + fftwf_complex *odata); + +void CUFFTAPI fftwf_execute_dft_c2r(const fftwf_plan plan, + fftwf_complex *idata, + float *odata); + +/// CUFFTW 64-bit Guru Interface +/// dp +fftw_plan CUFFTAPI fftw_plan_guru64_dft(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, fftw_complex* out, int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru64_dft_r2c(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, double* in, fftw_complex* out, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru64_dft_c2r(int rank, const fftw_iodim64* dims, int batch_rank, const fftw_iodim64* batch_dims, fftw_complex* in, double* out, unsigned flags); + +/// sp +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, fftwf_complex* out, int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_r2c(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, float* in, fftwf_complex* out, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_c2r(int rank, const fftwf_iodim64* dims, int batch_rank, const fftwf_iodim64* batch_dims, fftwf_complex* in, float* out, unsigned flags); + +#ifdef _WIN32 +#define _CUFFTAPI(T) T CUFFTAPI +#else +#define _CUFFTAPI(T) CUFFTAPI T +#endif + +// CUFFTW defines and supports the following support APIs +_CUFFTAPI(void *) fftw_malloc(size_t n); + +_CUFFTAPI(void *) fftwf_malloc(size_t n); + +void CUFFTAPI fftw_free(void *pointer); + +void CUFFTAPI fftwf_free(void *pointer); + +void CUFFTAPI fftw_export_wisdom_to_file(FILE * output_file); + +void CUFFTAPI fftwf_export_wisdom_to_file(FILE * output_file); + +void CUFFTAPI fftw_import_wisdom_from_file(FILE * input_file); + +void CUFFTAPI fftwf_import_wisdom_from_file(FILE * input_file); + +void CUFFTAPI fftw_print_plan(const fftw_plan plan); + +void CUFFTAPI fftwf_print_plan(const fftwf_plan plan); + +void CUFFTAPI fftw_set_timelimit(double seconds); + +void CUFFTAPI fftwf_set_timelimit(double seconds); + +double CUFFTAPI fftw_cost(const fftw_plan plan); + +double CUFFTAPI fftwf_cost(const fftw_plan plan); + +void CUFFTAPI fftw_flops(const fftw_plan plan, double *add, double *mul, double *fma); + +void CUFFTAPI fftwf_flops(const fftw_plan plan, double *add, double *mul, double *fma); + +void CUFFTAPI fftw_destroy_plan(fftw_plan plan); + +void CUFFTAPI fftwf_destroy_plan(fftwf_plan plan); + +void CUFFTAPI fftw_cleanup(void); + +void CUFFTAPI fftwf_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _CUFFTW_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..456aba477409939f085583b502ab6d2fcbdb8322 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..ecf81f1c12f2451b00d40ce4f37c526f7063fc31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab06d9dfcfaf88ec2bcfb4c16b76ff0bf3b2728370d212e28607f53e1d40eff5 +size 1614344 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fde097a08de3cee28572d749a712826f1c6bb00a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dfc76205a4e20511e1373867a06d6cdf6ea3946 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4be77a6eea692dbd06d814d0c21c80d7eaeff2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h @@ -0,0 +1,522 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#ifndef nvJitLink_INCLUDED +#define nvJitLink_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * + * \defgroup error Error codes + * + */ + +/** \ingroup error + * + * \brief The enumerated type nvJitLinkResult defines API call result codes. + * nvJitLink APIs return nvJitLinkResult codes to indicate the result. + */ + +typedef enum { + NVJITLINK_SUCCESS = 0, + NVJITLINK_ERROR_UNRECOGNIZED_OPTION, + NVJITLINK_ERROR_MISSING_ARCH, // -arch=sm_NN option not specified + NVJITLINK_ERROR_INVALID_INPUT, + NVJITLINK_ERROR_PTX_COMPILE, + NVJITLINK_ERROR_NVVM_COMPILE, + NVJITLINK_ERROR_INTERNAL, + NVJITLINK_ERROR_THREADPOOL, + NVJITLINK_ERROR_UNRECOGNIZED_INPUT, +#ifdef NEW_ERROR_CODES // These error codes will appear in a future CUDA release. + NVJITLINK_ERROR_NULL_INPUT, + NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS, + NVJITLINK_ERROR_INCORRECT_INPUT_TYPE, + NVJITLINK_ERROR_ARCH_MISMATCH, + NVJITLINK_ERROR_OUTDATED_LIBRARY, + NVJITLINK_ERROR_MISSING_FATBIN +#endif +} nvJitLinkResult; + +#ifndef NEW_ERROR_CODES // To avoid breaking compatibility, we map them to existing error codes for now. +#define NVJITLINK_ERROR_NULL_INPUT NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_INCORRECT_INPUT_TYPE NVJITLINK_ERROR_INVALID_INPUT +#define NVJITLINK_ERROR_ARCH_MISMATCH NVJITLINK_ERROR_INTERNAL +#define NVJITLINK_ERROR_OUTDATED_LIBRARY NVJITLINK_ERROR_INTERNAL +#define NVJITLINK_ERROR_MISSING_FATBIN NVJITLINK_ERROR_INVALID_INPUT +#endif + +/** + * + * \defgroup linking Linking + * + */ + +/** \ingroup linking + * + * \brief The enumerated type nvJitLinkInputType defines the kind of inputs + * that can be passed to nvJitLinkAdd* APIs. + */ + +typedef enum { + NVJITLINK_INPUT_NONE = 0, // error + NVJITLINK_INPUT_CUBIN = 1, + NVJITLINK_INPUT_PTX, + NVJITLINK_INPUT_LTOIR, + NVJITLINK_INPUT_FATBIN, + NVJITLINK_INPUT_OBJECT, + NVJITLINK_INPUT_LIBRARY, + NVJITLINK_INPUT_ANY = 10 // will dynamically determine one of above types +} nvJitLinkInputType; + +/** + * \defgroup options Supported Link Options + * + * nvJitLink supports the link options below. + * Option names are prefixed with a single dash (\c -). + * Options that take a value have an assignment operator (\c =) + * followed by the option value, with no spaces, e.g. \c "-arch=sm_90". + * + * The supported options are: + * - \c -arch=sm_ \n + * Pass SM architecture value. See nvcc for valid values of . + * Can use compute_ value instead if only generating PTX. + * This is a required option. + * - \c -maxrregcount= \n + * Maximum register count. + * - \c -time \n + * Print timing information to InfoLog. + * - \c -verbose \n + * Print verbose messages to InfoLog. + * - \c -lto \n + * Do link time optimization. + * - \c -ptx \n + * Emit ptx after linking instead of cubin; only supported with \c -lto + * - \c -O \n + * Optimization level. Only 0 and 3 are accepted. + * - \c -g \n + * Generate debug information. + * - \c -lineinfo \n + * Generate line information. + * - \c -ftz= \n + * Flush to zero. + * - \c -prec-div= \n + * Precise divide. + * - \c -prec-sqrt= \n + * Precise square root. + * - \c -fma= \n + * Fast multiply add. + * - \c -kernels-used= \n + * Pass list of kernels that are used; any not in the list can be removed. + * This option can be specified multiple times. + * - \c -variables-used= \n + * Pass list of variables that are used; any not in the list can be removed. + * This option can be specified multiple times. + * - \c -optimize-unused-variables \n + * Normally device code optimization is limited by not knowing what the + * host code references. With this option it can assume that if a variable + * is not referenced in device code then it can be removed. + * - \c -Xptxas= \n + * Pass to ptxas. This option can be called multiple times. + * - \c -split-compile= \n + * Split compilation maximum thread count. Use 0 to use all available processors. + * Value of 1 disables split compilation (default). + * - \c -split-compile-extended= \n + * [Experimental] A more aggressive form of split compilation. + * Accepts a maximum thread count value. Use 0 to use all available processors. + * Value of 1 disables extended split compilation (default). + * - \c -jump-table-density= \n + * When doing LTO, specify the case density percentage in switch statements, + * and use it as a minimal threshold to determine whether jump table(brx.idx + * instruction) will be used to implement a switch statement. Default + * value is 101. The percentage ranges from 0 to 101 inclusively. + */ + +/** + * \ingroup linking + * \brief nvJitLinkHandle is the unit of linking, and an opaque handle for + * a program. + * + * To link inputs, an instance of nvJitLinkHandle must be created first with + * nvJitLinkCreate(). + */ + +typedef struct nvJitLink* nvJitLinkHandle; // opaque handle + +// For versioning we will have separate API version for each library version + +extern nvJitLinkResult __nvJitLinkCreate_12_4( + nvJitLinkHandle *handle, + uint32_t numOptions, + const char **options); +/** + * \ingroup linking + * \brief nvJitLinkCreate creates an instance of nvJitLinkHandle with the + * given input options, and sets the output parameter \p handle. + * + * \param [out] handle Address of nvJitLink handle. + * \param [in] numOptions Number of options passed. + * \param [in] options Array of size \p numOptions of option strings. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_UNRECOGNIZED_OPTION\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_MISSING_ARCH\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * It supports options listed in \ref options. + * + * \see nvJitLinkDestroy + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkCreate( + nvJitLinkHandle *handle, + uint32_t numOptions, + const char **options) +{ + return __nvJitLinkCreate_12_4 (handle, numOptions, options); +} +#endif + +extern nvJitLinkResult __nvJitLinkDestroy_12_4 (nvJitLinkHandle *handle); +/** + * \ingroup linking + * \brief nvJitLinkDestroy frees the memory associated with the given handle + * and sets it to NULL. + * + * \param [in] handle Address of nvJitLink handle. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkCreate + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkDestroy (nvJitLinkHandle *handle) +{ + return __nvJitLinkDestroy_12_4 (handle); +} +#endif + +extern nvJitLinkResult __nvJitLinkAddData_12_4( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const void *data, + size_t size, + const char *name); // name can be null +/** + * \ingroup linking + * \brief nvJitLinkAddData adds data image to the link. + * + * \param [in] handle nvJitLink handle. + * \param [in] inputType kind of input. + * \param [in] data pointer to data image in memory. + * \param [in] size size of the data. + * \param [in] name name of input object. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkAddData( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const void *data, + size_t size, + const char *name) // name can be null +{ + return __nvJitLinkAddData_12_4 (handle, inputType, data, size, name); +} +#endif + +extern nvJitLinkResult __nvJitLinkAddFile_12_4( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const char *fileName); // includes path to file +/** + * \ingroup linking + * \brief nvJitLinkAddFile reads data from file and links it in. + * + * \param [in] handle nvJitLink handle. + * \param [in] inputType kind of input. + * \param [in] fileName name of file. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkAddFile( + nvJitLinkHandle handle, + nvJitLinkInputType inputType, + const char *fileName) // includes path to file +{ + return __nvJitLinkAddFile_12_4 (handle, inputType, fileName); +} +#endif + +extern nvJitLinkResult __nvJitLinkComplete_12_4 (nvJitLinkHandle handle); +/** + * \ingroup linking + * \brief nvJitLinkComplete does the actual link. + * + * \param [in] handle nvJitLink handle. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkComplete (nvJitLinkHandle handle) +{ + return __nvJitLinkComplete_12_4 (handle); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedCubinSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedCubinSize gets the size of the linked cubin. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the linked cubin. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetLinkedCubin + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedCubinSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetLinkedCubinSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedCubin_12_4( + nvJitLinkHandle handle, + void *cubin); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedCubin gets the linked cubin. + * + * \param [in] handle nvJitLink handle. + * \param [out] cubin The linked cubin. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p cubin. + * \see nvJitLinkGetLinkedCubinSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedCubin( + nvJitLinkHandle handle, + void *cubin) +{ + return __nvJitLinkGetLinkedCubin_12_4 (handle, cubin); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedPtxSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedPtxSize gets the size of the linked ptx. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the linked PTX. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * Linked PTX is only available when using the \c -lto option. + * \see nvJitLinkGetLinkedPtx + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedPtxSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetLinkedPtxSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetLinkedPtx_12_4( + nvJitLinkHandle handle, + char *ptx); +/** + * \ingroup linking + * \brief nvJitLinkGetLinkedPtx gets the linked ptx. + * + * \param [in] handle nvJitLink handle. + * \param [out] ptx The linked PTX. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * Linked PTX is only available when using the \c -lto option. + * User is responsible for allocating enough space to hold the \p ptx. + * \see nvJitLinkGetLinkedPtxSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetLinkedPtx( + nvJitLinkHandle handle, + char *ptx) +{ + return __nvJitLinkGetLinkedPtx_12_4 (handle, ptx); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetErrorLogSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetErrorLogSize gets the size of the error log. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the error log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetErrorLog + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetErrorLogSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetErrorLogSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetErrorLog_12_4( + nvJitLinkHandle handle, + char *log); +/** + * \ingroup linking + * \brief nvJitLinkGetErrorLog puts any error messages in the log. + * + * \param [in] handle nvJitLink handle. + * \param [out] log The error log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p log. + * \see nvJitLinkGetErrorLogSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetErrorLog( + nvJitLinkHandle handle, + char *log) +{ + return __nvJitLinkGetErrorLog_12_4 (handle, log); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetInfoLogSize_12_4( + nvJitLinkHandle handle, + size_t *size); +/** + * \ingroup linking + * \brief nvJitLinkGetInfoLogSize gets the size of the info log. + * + * \param [in] handle nvJitLink handle. + * \param [out] size Size of the info log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * \see nvJitLinkGetInfoLog + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetInfoLogSize( + nvJitLinkHandle handle, + size_t *size) +{ + return __nvJitLinkGetInfoLogSize_12_4 (handle, size); +} +#endif + +extern nvJitLinkResult __nvJitLinkGetInfoLog_12_4( + nvJitLinkHandle handle, + char *log); +/** + * \ingroup linking + * \brief nvJitLinkGetInfoLog puts any info messages in the log. + * + * \param [in] handle nvJitLink handle. + * \param [out] log The info log. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + * User is responsible for allocating enough space to hold the \p log. + * \see nvJitLinkGetInfoLogSize + */ +#ifndef NVJITLINK_NO_INLINE +static inline nvJitLinkResult nvJitLinkGetInfoLog( + nvJitLinkHandle handle, + char *log) +{ + return __nvJitLinkGetInfoLog_12_4 (handle, log); +} +#endif + +/** + * \ingroup linking + * \brief nvJitLinkVersion returns the current version of nvJitLink. + * + * \param [out] major The major version. + * \param [out] minor The minor version. + * \return + * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink + * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink + * + */ +extern nvJitLinkResult nvJitLinkVersion( + unsigned int *major, + unsigned int *minor); + +#ifdef __cplusplus +} +#endif + +#endif // nvJitLink_INCLUDED + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..706f14cba87bff515ab0d8091ff659e5be099e09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__init__.py b/llmeval-env/lib/python3.10/site-packages/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7c0aa12ca950f230c8092436a985b2305702642 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "24.0" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014 %s" % __author__ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d958bba0a5043c9014dc29fc97560cb700973c6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65a402e456c10c952a108a3b92db08a97dc7dae4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f654b1d6f0c2db62d98a0af52caaf9fed780e5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec9de936a7f4bd635216d43d12a3c1863b6b307 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c440f8bffe67b04d28d526a01e41912c883097cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10769aa18b31f5718a778b053baa7282b02c0dfe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e20893c21f35ec61c5d9988c1c78ed11d5778cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_elffile.py b/llmeval-env/lib/python3.10/site-packages/packaging/_elffile.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb19b30bb53c18f38a9ef02dd7c4478670fb962 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_manylinux.py b/llmeval-env/lib/python3.10/site-packages/packaging/_manylinux.py new file mode 100644 index 0000000000000000000000000000000000000000..ad62505f3ff66c3d4da07ce1f2a50d9f10bc1bdd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_manylinux.py @@ -0,0 +1,260 @@ +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } + return any(arch in allowed_archs for arch in archs) + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: Optional[str] = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if set(archs) & {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_musllinux.py b/llmeval-env/lib/python3.10/site-packages/packaging/_musllinux.py new file mode 100644 index 0000000000000000000000000000000000000000..86419df9d7087f3f8b6d0096f32a52c24b05e7c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_musllinux.py @@ -0,0 +1,83 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Optional, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_parser.py b/llmeval-env/lib/python3.10/site-packages/packaging/_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..684df75457cb82d3683dc99ff52c5bf911f3341b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_parser.py @@ -0,0 +1,356 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if env_var in ("platform_python_implementation", "python_implementation"): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_structures.py b/llmeval-env/lib/python3.10/site-packages/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..90a6465f9682c886363eea5327dac64bf623a6ff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/_tokenizer.py b/llmeval-env/lib/python3.10/site-packages/packaging/_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0d648d49a7c1a62d25ce5c9107aa448a8a22d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/_tokenizer.py @@ -0,0 +1,192 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/markers.py b/llmeval-env/lib/python3.10/site-packages/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..8b98fca7233be6dd9324cd2b6d71b6a8ac91a6cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/markers.py @@ -0,0 +1,252 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, +) +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers(self._markers, current_environment) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/metadata.py b/llmeval-env/lib/python3.10/site-packages/packaging/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..fb274930799da0f8ee17566b5b587b4047282c7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/metadata.py @@ -0,0 +1,825 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import Literal, TypedDict + else: + try: + from typing_extensions import Literal, TypedDict + except ImportError: + + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +try: + ExceptionGroup +except NameError: # pragma: no cover + + class ExceptionGroup(Exception): # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + +else: # pragma: no cover + ExceptionGroup = ExceptionGroup + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[Optional[List[str]]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[Optional[str]] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[Optional[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/py.typed b/llmeval-env/lib/python3.10/site-packages/packaging/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/requirements.py b/llmeval-env/lib/python3.10/site-packages/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc43a7e98d87dba0c2069bfb4554f71d228cad4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/requirements.py @@ -0,0 +1,90 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from typing import Any, Iterator, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/specifiers.py b/llmeval-env/lib/python3.10/site-packages/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..2d015bab5958fd9767cf5c9e449f2fa33292c962 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/specifiers.py @@ -0,0 +1,1017 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: List[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: List[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/tags.py b/llmeval-env/lib/python3.10/site-packages/packaging/tags.py new file mode 100644 index 0000000000000000000000000000000000000000..89f1926137dd2d2a6bd63616bf5b9f722fc8d584 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/tags.py @@ -0,0 +1,571 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import logging +import platform +import re +import struct +import subprocess +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> FrozenSet[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value: Union[int, str, None] = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") + + +def _is_threaded_cpython(abis: List[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + threading = debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") + return abis + + +def cpython_tags( + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if use_abi3: + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] + + +def generic_tags( + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + for minor_version in range(version[1], -1, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=10, minor=minor_version, binary_format=binary_format + ) + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + for major_version in range(version[0], 10, -1): + compat_version = major_version, 0 + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=major_version, minor=0, binary_format=binary_format + ) + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + else: + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_format = "universal2" + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv8l" + _, arch = linux.split("_", 1) + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/utils.py b/llmeval-env/lib/python3.10/site-packages/packaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c2f75aa806282d322c76c2117c0f0fdfb09d25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/utils.py @@ -0,0 +1,172 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) +_canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name. + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename}" + ) from e + + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename}" + ) from e + + return (name, version) diff --git a/llmeval-env/lib/python3.10/site-packages/packaging/version.py b/llmeval-env/lib/python3.10/site-packages/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5faab9bd0dcf28847960162b2b4f13a8a556ef20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/packaging/version.py @@ -0,0 +1,563 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import itertools +import re +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..cff5eb74e1badd1c5237ed2654b349530179ad1d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+ * Neither the name of the psutil authors nor the names of its contributors
+   may be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..1815b243bc8480c1df3c73a79e1dda1ff74ea2f0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA
@@ -0,0 +1,530 @@
+Metadata-Version: 2.1
+Name: psutil
+Version: 5.9.8
+Summary: Cross-platform lib for process and system monitoring in Python.
+Home-page: https://github.com/giampaolo/psutil
+Author: Giampaolo Rodola
+Author-email: g.rodola@gmail.com
+License: BSD-3-Clause
+Keywords: ps,top,kill,free,lsof,netstat,nice,tty,ionice,uptime,taskmgr,process,df,iotop,iostat,ifconfig,taskset,who,pidof,pmap,smem,pstree,monitoring,ulimit,prlimit,smem,performance,metrics,agent,observability
+Platform: Platform Independent
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: Win32 (MS Windows)
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows :: Windows 10
+Classifier: Operating System :: Microsoft :: Windows :: Windows 7
+Classifier: Operating System :: Microsoft :: Windows :: Windows 8
+Classifier: Operating System :: Microsoft :: Windows :: Windows 8.1
+Classifier: Operating System :: Microsoft :: Windows :: Windows Server 2003
+Classifier: Operating System :: Microsoft :: Windows :: Windows Server 2008
+Classifier: Operating System :: Microsoft :: Windows :: Windows Vista
+Classifier: Operating System :: Microsoft
+Classifier: Operating System :: OS Independent
+Classifier: Operating System :: POSIX :: AIX
+Classifier: Operating System :: POSIX :: BSD :: FreeBSD
+Classifier: Operating System :: POSIX :: BSD :: NetBSD
+Classifier: Operating System :: POSIX :: BSD :: OpenBSD
+Classifier: Operating System :: POSIX :: BSD
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: POSIX :: SunOS/Solaris
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: System :: Benchmark
+Classifier: Topic :: System :: Hardware :: Hardware Drivers
+Classifier: Topic :: System :: Hardware
+Classifier: Topic :: System :: Monitoring
+Classifier: Topic :: System :: Networking :: Monitoring :: Hardware Watchdog
+Classifier: Topic :: System :: Networking :: Monitoring
+Classifier: Topic :: System :: Networking
+Classifier: Topic :: System :: Operating System
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Provides-Extra: test
+Requires-Dist: ipaddress ; (python_version < "3.0") and extra == 'test'
+Requires-Dist: mock ; (python_version < "3.0") and extra == 'test'
+Requires-Dist: enum34 ; (python_version <= "3.4") and extra == 'test'
+Requires-Dist: pywin32 ; (sys_platform == "win32") and extra == 'test'
+Requires-Dist: wmi ; (sys_platform == "win32") and extra == 'test'
+
+|  |downloads| |stars| |forks| |contributors| |coverage|
+|  |version| |py-versions| |packages| |license|
+|  |github-actions-wheels|  |github-actions-bsd| |appveyor| |doc| |twitter| |tidelift|
+
+.. |downloads| image:: https://img.shields.io/pypi/dm/psutil.svg
+    :target: https://pepy.tech/project/psutil
+    :alt: Downloads
+
+.. |stars| image:: https://img.shields.io/github/stars/giampaolo/psutil.svg
+    :target: https://github.com/giampaolo/psutil/stargazers
+    :alt: Github stars
+
+.. |forks| image:: https://img.shields.io/github/forks/giampaolo/psutil.svg
+    :target: https://github.com/giampaolo/psutil/network/members
+    :alt: Github forks
+
+.. |contributors| image:: https://img.shields.io/github/contributors/giampaolo/psutil.svg
+    :target: https://github.com/giampaolo/psutil/graphs/contributors
+    :alt: Contributors
+
+.. |github-actions-wheels| image:: https://img.shields.io/github/actions/workflow/status/giampaolo/psutil/.github/workflows/build.yml?label=Linux%2C%20macOS%2C%20Windows
+    :target: https://github.com/giampaolo/psutil/actions?query=workflow%3Abuild
+    :alt: Linux, macOS, Windows
+
+.. |github-actions-bsd| image:: https://img.shields.io/github/actions/workflow/status/giampaolo/psutil/.github/workflows/bsd.yml?label=FreeBSD,%20NetBSD,%20OpenBSD
+    :target: https://github.com/giampaolo/psutil/actions?query=workflow%3Absd-tests
+    :alt: FreeBSD, NetBSD, OpenBSD
+
+.. |appveyor| image:: https://img.shields.io/appveyor/build/giampaolo/psutil/master.svg?maxAge=3600&label=Windows%20(py2)
+    :target: https://ci.appveyor.com/project/giampaolo/psutil
+    :alt: Windows (Appveyor)
+
+.. |coverage| image:: https://coveralls.io/repos/github/giampaolo/psutil/badge.svg?branch=master
+    :target: https://coveralls.io/github/giampaolo/psutil?branch=master
+    :alt: Test coverage (coverall.io)
+
+.. |doc| image:: https://readthedocs.org/projects/psutil/badge/?version=latest
+    :target: https://psutil.readthedocs.io/en/latest/
+    :alt: Documentation Status
+
+.. |version| image:: https://img.shields.io/pypi/v/psutil.svg?label=pypi
+    :target: https://pypi.org/project/psutil
+    :alt: Latest version
+
+.. |py-versions| image:: https://img.shields.io/pypi/pyversions/psutil.svg
+    :alt: Supported Python versions
+
+.. |packages| image:: https://repology.org/badge/tiny-repos/python:psutil.svg
+    :target: https://repology.org/metapackage/python:psutil/versions
+    :alt: Binary packages
+
+.. |license| image:: https://img.shields.io/pypi/l/psutil.svg
+    :target: https://github.com/giampaolo/psutil/blob/master/LICENSE
+    :alt: License
+
+.. |twitter| image:: https://img.shields.io/twitter/follow/grodola.svg?label=follow&style=flat&logo=twitter&logoColor=4FADFF
+    :target: https://twitter.com/grodola
+    :alt: Twitter Follow
+
+.. |tidelift| image:: https://tidelift.com/badges/github/giampaolo/psutil?style=flat
+    :target: https://tidelift.com/subscription/pkg/pypi-psutil?utm_source=pypi-psutil&utm_medium=referral&utm_campaign=readme
+    :alt: Tidelift
+
+-----
+
+Quick links
+===========
+
+- `Home page `_
+- `Install `_
+- `Documentation `_
+- `Download `_
+- `Forum `_
+- `StackOverflow `_
+- `Blog `_
+- `What's new `_
+
+
+Summary
+=======
+
+psutil (process and system utilities) is a cross-platform library for
+retrieving information on **running processes** and **system utilization**
+(CPU, memory, disks, network, sensors) in Python.
+It is useful mainly for **system monitoring**, **profiling and limiting process
+resources** and **management of running processes**.
+It implements many functionalities offered by classic UNIX command line tools
+such as *ps, top, iotop, lsof, netstat, ifconfig, free* and others.
+psutil currently supports the following platforms:
+
+- **Linux**
+- **Windows**
+- **macOS**
+- **FreeBSD, OpenBSD**, **NetBSD**
+- **Sun Solaris**
+- **AIX**
+
+Supported Python versions are **2.7**, **3.6+** and
+`PyPy `__.
+
+Funding
+=======
+
+While psutil is free software and will always be, the project would benefit
+immensely from some funding.
+Keeping up with bug reports and maintenance has become hardly sustainable for
+me alone in terms of time.
+If you're a company that's making significant use of psutil you can consider
+becoming a sponsor via `GitHub Sponsors `__,
+`Open Collective `__ or
+`PayPal `__
+and have your logo displayed in here and psutil `doc `__.
+
+Sponsors
+========
+
+.. image:: https://github.com/giampaolo/psutil/raw/master/docs/_static/tidelift-logo.png
+  :width: 200
+  :alt: Alternative text
+
+`Add your logo `__.
+
+Example usages
+==============
+
+This represents pretty much the whole psutil API.
+
+CPU
+---
+
+.. code-block:: python
+
+    >>> import psutil
+    >>>
+    >>> psutil.cpu_times()
+    scputimes(user=3961.46, nice=169.729, system=2150.659, idle=16900.540, iowait=629.59, irq=0.0, softirq=19.42, steal=0.0, guest=0, guest_nice=0.0)
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1)
+    ...
+    4.0
+    5.9
+    3.8
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1, percpu=True)
+    ...
+    [4.0, 6.9, 3.7, 9.2]
+    [7.0, 8.5, 2.4, 2.1]
+    [1.2, 9.0, 9.9, 7.2]
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_times_percent(interval=1, percpu=False)
+    ...
+    scputimes(user=1.5, nice=0.0, system=0.5, idle=96.5, iowait=1.5, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=1.0, nice=0.0, system=0.0, idle=99.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=2.0, nice=0.0, system=0.0, idle=98.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    >>>
+    >>> psutil.cpu_count()
+    4
+    >>> psutil.cpu_count(logical=False)
+    2
+    >>>
+    >>> psutil.cpu_stats()
+    scpustats(ctx_switches=20455687, interrupts=6598984, soft_interrupts=2134212, syscalls=0)
+    >>>
+    >>> psutil.cpu_freq()
+    scpufreq(current=931.42925, min=800.0, max=3500.0)
+    >>>
+    >>> psutil.getloadavg()  # also on Windows (emulated)
+    (3.14, 3.89, 4.67)
+
+Memory
+------
+
+.. code-block:: python
+
+    >>> psutil.virtual_memory()
+    svmem(total=10367352832, available=6472179712, percent=37.6, used=8186245120, free=2181107712, active=4748992512, inactive=2758115328, buffers=790724608, cached=3500347392, shared=787554304)
+    >>> psutil.swap_memory()
+    sswap(total=2097147904, used=296128512, free=1801019392, percent=14.1, sin=304193536, sout=677842944)
+    >>>
+
+Disks
+-----
+
+.. code-block:: python
+
+    >>> psutil.disk_partitions()
+    [sdiskpart(device='/dev/sda1', mountpoint='/', fstype='ext4', opts='rw,nosuid', maxfile=255, maxpath=4096),
+     sdiskpart(device='/dev/sda2', mountpoint='/home', fstype='ext', opts='rw', maxfile=255, maxpath=4096)]
+    >>>
+    >>> psutil.disk_usage('/')
+    sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5)
+    >>>
+    >>> psutil.disk_io_counters(perdisk=False)
+    sdiskio(read_count=719566, write_count=1082197, read_bytes=18626220032, write_bytes=24081764352, read_time=5023392, write_time=63199568, read_merged_count=619166, write_merged_count=812396, busy_time=4523412)
+    >>>
+
+Network
+-------
+
+.. code-block:: python
+
+    >>> psutil.net_io_counters(pernic=True)
+    {'eth0': netio(bytes_sent=485291293, bytes_recv=6004858642, packets_sent=3251564, packets_recv=4787798, errin=0, errout=0, dropin=0, dropout=0),
+     'lo': netio(bytes_sent=2838627, bytes_recv=2838627, packets_sent=30567, packets_recv=30567, errin=0, errout=0, dropin=0, dropout=0)}
+    >>>
+    >>> psutil.net_connections(kind='tcp')
+    [sconn(fd=115, family=, type=, laddr=addr(ip='10.0.0.1', port=48776), raddr=addr(ip='93.186.135.91', port=80), status='ESTABLISHED', pid=1254),
+     sconn(fd=117, family=, type=, laddr=addr(ip='10.0.0.1', port=43761), raddr=addr(ip='72.14.234.100', port=80), status='CLOSING', pid=2987),
+     ...]
+    >>>
+    >>> psutil.net_if_addrs()
+    {'lo': [snicaddr(family=, address='127.0.0.1', netmask='255.0.0.0', broadcast='127.0.0.1', ptp=None),
+            snicaddr(family=, address='::1', netmask='ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', broadcast=None, ptp=None),
+            snicaddr(family=, address='00:00:00:00:00:00', netmask=None, broadcast='00:00:00:00:00:00', ptp=None)],
+     'wlan0': [snicaddr(family=, address='192.168.1.3', netmask='255.255.255.0', broadcast='192.168.1.255', ptp=None),
+               snicaddr(family=, address='fe80::c685:8ff:fe45:641%wlan0', netmask='ffff:ffff:ffff:ffff::', broadcast=None, ptp=None),
+               snicaddr(family=, address='c4:85:08:45:06:41', netmask=None, broadcast='ff:ff:ff:ff:ff:ff', ptp=None)]}
+    >>>
+    >>> psutil.net_if_stats()
+    {'lo': snicstats(isup=True, duplex=, speed=0, mtu=65536, flags='up,loopback,running'),
+     'wlan0': snicstats(isup=True, duplex=, speed=100, mtu=1500, flags='up,broadcast,running,multicast')}
+    >>>
+
+Sensors
+-------
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.sensors_temperatures()
+    {'acpitz': [shwtemp(label='', current=47.0, high=103.0, critical=103.0)],
+     'asus': [shwtemp(label='', current=47.0, high=None, critical=None)],
+     'coretemp': [shwtemp(label='Physical id 0', current=52.0, high=100.0, critical=100.0),
+                  shwtemp(label='Core 0', current=45.0, high=100.0, critical=100.0)]}
+    >>>
+    >>> psutil.sensors_fans()
+    {'asus': [sfan(label='cpu_fan', current=3200)]}
+    >>>
+    >>> psutil.sensors_battery()
+    sbattery(percent=93, secsleft=16628, power_plugged=False)
+    >>>
+
+Other system info
+-----------------
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.users()
+    [suser(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0, pid=1352),
+     suser(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0, pid=1788)]
+    >>>
+    >>> psutil.boot_time()
+    1365519115.0
+    >>>
+
+Process management
+------------------
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.pids()
+    [1, 2, 3, 4, 5, 6, 7, 46, 48, 50, 51, 178, 182, 222, 223, 224, 268, 1215,
+     1216, 1220, 1221, 1243, 1244, 1301, 1601, 2237, 2355, 2637, 2774, 3932,
+     4176, 4177, 4185, 4187, 4189, 4225, 4243, 4245, 4263, 4282, 4306, 4311,
+     4312, 4313, 4314, 4337, 4339, 4357, 4358, 4363, 4383, 4395, 4408, 4433,
+     4443, 4445, 4446, 5167, 5234, 5235, 5252, 5318, 5424, 5644, 6987, 7054,
+     7055, 7071]
+    >>>
+    >>> p = psutil.Process(7055)
+    >>> p
+    psutil.Process(pid=7055, name='python3', status='running', started='09:04:44')
+    >>> p.pid
+    7055
+    >>> p.name()
+    'python3'
+    >>> p.exe()
+    '/usr/bin/python3'
+    >>> p.cwd()
+    '/home/giampaolo'
+    >>> p.cmdline()
+    ['/usr/bin/python3', 'main.py']
+    >>>
+    >>> p.ppid()
+    7054
+    >>> p.parent()
+    psutil.Process(pid=4699, name='bash', status='sleeping', started='09:06:44')
+    >>> p.parents()
+    [psutil.Process(pid=4699, name='bash', started='09:06:44'),
+     psutil.Process(pid=4689, name='gnome-terminal-server', status='sleeping', started='0:06:44'),
+     psutil.Process(pid=1, name='systemd', status='sleeping', started='05:56:55')]
+    >>> p.children(recursive=True)
+    [psutil.Process(pid=29835, name='python3', status='sleeping', started='11:45:38'),
+     psutil.Process(pid=29836, name='python3', status='waking', started='11:43:39')]
+    >>>
+    >>> p.status()
+    'running'
+    >>> p.create_time()
+    1267551141.5019531
+    >>> p.terminal()
+    '/dev/pts/0'
+    >>>
+    >>> p.username()
+    'giampaolo'
+    >>> p.uids()
+    puids(real=1000, effective=1000, saved=1000)
+    >>> p.gids()
+    pgids(real=1000, effective=1000, saved=1000)
+    >>>
+    >>> p.cpu_times()
+    pcputimes(user=1.02, system=0.31, children_user=0.32, children_system=0.1, iowait=0.0)
+    >>> p.cpu_percent(interval=1.0)
+    12.1
+    >>> p.cpu_affinity()
+    [0, 1, 2, 3]
+    >>> p.cpu_affinity([0, 1])  # set
+    >>> p.cpu_num()
+    1
+    >>>
+    >>> p.memory_info()
+    pmem(rss=10915840, vms=67608576, shared=3313664, text=2310144, lib=0, data=7262208, dirty=0)
+    >>> p.memory_full_info()  # "real" USS memory usage (Linux, macOS, Win only)
+    pfullmem(rss=10199040, vms=52133888, shared=3887104, text=2867200, lib=0, data=5967872, dirty=0, uss=6545408, pss=6872064, swap=0)
+    >>> p.memory_percent()
+    0.7823
+    >>> p.memory_maps()
+    [pmmap_grouped(path='/lib/x8664-linux-gnu/libutil-2.15.so', rss=32768, size=2125824, pss=32768, shared_clean=0, shared_dirty=0, private_clean=20480, private_dirty=12288, referenced=32768, anonymous=12288, swap=0),
+     pmmap_grouped(path='/lib/x8664-linux-gnu/libc-2.15.so', rss=3821568, size=3842048, pss=3821568, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=3821568, referenced=3575808, anonymous=3821568, swap=0),
+     pmmap_grouped(path='[heap]',  rss=32768, size=139264, pss=32768, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=32768, referenced=32768, anonymous=32768, swap=0),
+     pmmap_grouped(path='[stack]', rss=2465792, size=2494464, pss=2465792, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=2465792, referenced=2277376, anonymous=2465792, swap=0),
+     ...]
+    >>>
+    >>> p.io_counters()
+    pio(read_count=478001, write_count=59371, read_bytes=700416, write_bytes=69632, read_chars=456232, write_chars=517543)
+    >>>
+    >>> p.open_files()
+    [popenfile(path='/home/giampaolo/monit.py', fd=3, position=0, mode='r', flags=32768),
+     popenfile(path='/var/log/monit.log', fd=4, position=235542, mode='a', flags=33793)]
+    >>>
+    >>> p.connections(kind='tcp')
+    [pconn(fd=115, family=, type=, laddr=addr(ip='10.0.0.1', port=48776), raddr=addr(ip='93.186.135.91', port=80), status='ESTABLISHED'),
+     pconn(fd=117, family=, type=, laddr=addr(ip='10.0.0.1', port=43761), raddr=addr(ip='72.14.234.100', port=80), status='CLOSING')]
+    >>>
+    >>> p.threads()
+    [pthread(id=5234, user_time=22.5, system_time=9.2891),
+     pthread(id=5237, user_time=0.0707, system_time=1.1)]
+    >>>
+    >>> p.num_threads()
+    4
+    >>> p.num_fds()
+    8
+    >>> p.num_ctx_switches()
+    pctxsw(voluntary=78, involuntary=19)
+    >>>
+    >>> p.nice()
+    0
+    >>> p.nice(10)  # set
+    >>>
+    >>> p.ionice(psutil.IOPRIO_CLASS_IDLE)  # IO priority (Win and Linux only)
+    >>> p.ionice()
+    pionice(ioclass=, value=0)
+    >>>
+    >>> p.rlimit(psutil.RLIMIT_NOFILE, (5, 5))  # set resource limits (Linux only)
+    >>> p.rlimit(psutil.RLIMIT_NOFILE)
+    (5, 5)
+    >>>
+    >>> p.environ()
+    {'LC_PAPER': 'it_IT.UTF-8', 'SHELL': '/bin/bash', 'GREP_OPTIONS': '--color=auto',
+    'XDG_CONFIG_DIRS': '/etc/xdg/xdg-ubuntu:/usr/share/upstart/xdg:/etc/xdg',
+     ...}
+    >>>
+    >>> p.as_dict()
+    {'status': 'running', 'num_ctx_switches': pctxsw(voluntary=63, involuntary=1), 'pid': 5457, ...}
+    >>> p.is_running()
+    True
+    >>> p.suspend()
+    >>> p.resume()
+    >>>
+    >>> p.terminate()
+    >>> p.kill()
+    >>> p.wait(timeout=3)
+    
+    >>>
+    >>> psutil.test()
+    USER         PID %CPU %MEM     VSZ     RSS TTY        START    TIME  COMMAND
+    root           1  0.0  0.0   24584    2240            Jun17   00:00  init
+    root           2  0.0  0.0       0       0            Jun17   00:00  kthreadd
+    ...
+    giampaolo  31475  0.0  0.0   20760    3024 /dev/pts/0 Jun19   00:00  python2.4
+    giampaolo  31721  0.0  2.2  773060  181896            00:04   10:30  chrome
+    root       31763  0.0  0.0       0       0            00:05   00:00  kworker/0:1
+    >>>
+
+Further process APIs
+--------------------
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> for proc in psutil.process_iter(['pid', 'name']):
+    ...     print(proc.info)
+    ...
+    {'pid': 1, 'name': 'systemd'}
+    {'pid': 2, 'name': 'kthreadd'}
+    {'pid': 3, 'name': 'ksoftirqd/0'}
+    ...
+    >>>
+    >>> psutil.pid_exists(3)
+    True
+    >>>
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> # waits for multiple processes to terminate
+    >>> gone, alive = psutil.wait_procs(procs_list, timeout=3, callback=on_terminate)
+    >>>
+
+Windows services
+----------------
+
+.. code-block:: python
+
+    >>> list(psutil.win_service_iter())
+    [,
+     ,
+     ,
+     ,
+     ...]
+    >>> s = psutil.win_service_get('alg')
+    >>> s.as_dict()
+    {'binpath': 'C:\\Windows\\System32\\alg.exe',
+     'description': 'Provides support for 3rd party protocol plug-ins for Internet Connection Sharing',
+     'display_name': 'Application Layer Gateway Service',
+     'name': 'alg',
+     'pid': None,
+     'start_type': 'manual',
+     'status': 'stopped',
+     'username': 'NT AUTHORITY\\LocalService'}
+
+Projects using psutil
+=====================
+
+Here's some I find particularly interesting:
+
+- https://github.com/google/grr
+- https://github.com/facebook/osquery/
+- https://github.com/nicolargo/glances
+- https://github.com/aristocratos/bpytop
+- https://github.com/Jahaja/psdash
+- https://github.com/ajenti/ajenti
+- https://github.com/home-assistant/home-assistant/
+
+Portings
+========
+
+- Go: https://github.com/shirou/gopsutil
+- C: https://github.com/hamon-in/cpslib
+- Rust: https://github.com/rust-psutil/rust-psutil
+- Nim: https://github.com/johnscillieri/psutil-nim
+
+
+
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..7824e16634a45a391aa0397ef18c906530862a6a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/RECORD
@@ -0,0 +1,66 @@
+psutil-5.9.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+psutil-5.9.8.dist-info/LICENSE,sha256=uJwGOzeG4o4MCjjxkx22H-015p3SopZvvs_-4PRsjRA,1548
+psutil-5.9.8.dist-info/METADATA,sha256=GfZ5-fidrq0yXOCgyN4funClVHk6R_zfJz-3FKx0KjU,21837
+psutil-5.9.8.dist-info/RECORD,,
+psutil-5.9.8.dist-info/WHEEL,sha256=rgpVBmjjvbINeGKCkWEGd3f40VHMTsDkQj1Lgil82zE,221
+psutil-5.9.8.dist-info/top_level.txt,sha256=gCNhn57wzksDjSAISmgMJ0aiXzQulk0GJhb2-BAyYgw,7
+psutil/__init__.py,sha256=YWf_i2ZjuJqELRfNl6nX0nZuoi09GXqMZPJVsMJMCQQ,89169
+psutil/__pycache__/__init__.cpython-310.pyc,,
+psutil/__pycache__/_common.cpython-310.pyc,,
+psutil/__pycache__/_compat.cpython-310.pyc,,
+psutil/__pycache__/_psaix.cpython-310.pyc,,
+psutil/__pycache__/_psbsd.cpython-310.pyc,,
+psutil/__pycache__/_pslinux.cpython-310.pyc,,
+psutil/__pycache__/_psosx.cpython-310.pyc,,
+psutil/__pycache__/_psposix.cpython-310.pyc,,
+psutil/__pycache__/_pssunos.cpython-310.pyc,,
+psutil/__pycache__/_pswindows.cpython-310.pyc,,
+psutil/_common.py,sha256=BTwHxdYJQynrn5i8IOs6XFxLo9L1Eg5cgDCq6Yaypr0,29393
+psutil/_compat.py,sha256=AOF0vSCWle_sbJ1Gw-CGx0aEI9yk5u70YhPYOPg3KHs,15349
+psutil/_psaix.py,sha256=1bqEwjk6IG3Y-zrDajKi8oPSYvq3NrqpPUQeDRo4Ugg,18749
+psutil/_psbsd.py,sha256=FruAJy_GrpjAfHLpb4c3IVGfy2Xii8b1BHnIjxUfbbI,31956
+psutil/_pslinux.py,sha256=QYI6yHTvRMYZlVxB068xVAEMEAGGRSU9E7-sJD3165o,88043
+psutil/_psosx.py,sha256=d_KMSzmjL6vAYQx1fQN57b3xazJGdTt4rIysODB1r2g,16209
+psutil/_psposix.py,sha256=X9rd7WHKQ6mUAn2ihb03MCnzrBtQsrPRkCouExmuagQ,8235
+psutil/_pssunos.py,sha256=Zx6eLY-0NRUFFIKP7SycktgDoottdnlA9aX8y4e74dY,25559
+psutil/_psutil_linux.abi3.so,sha256=onwm8BWn6axbKjPqB263JHm6rbDcEvMncKYhhpbu5I4,115304
+psutil/_psutil_posix.abi3.so,sha256=xNVKy1LdKcnigEe_BpaXfYEg4qad9MkedLwkYh4BaBk,71624
+psutil/_pswindows.py,sha256=BXgoASpIS6ccw5jTx4V-H2fMsIcSv_NQ6EozsxFgY-0,37734
+psutil/tests/__init__.py,sha256=gc621Vvgj2NaxusB_zGwfqwP_np3qaCQqJlyPrp1D0k,64753
+psutil/tests/__main__.py,sha256=f1YY6SZebctd5Hwb1in40nFShfJw4zA2FLidsdg_eY8,269
+psutil/tests/__pycache__/__init__.cpython-310.pyc,,
+psutil/tests/__pycache__/__main__.cpython-310.pyc,,
+psutil/tests/__pycache__/runner.cpython-310.pyc,,
+psutil/tests/__pycache__/test_aix.cpython-310.pyc,,
+psutil/tests/__pycache__/test_bsd.cpython-310.pyc,,
+psutil/tests/__pycache__/test_connections.cpython-310.pyc,,
+psutil/tests/__pycache__/test_contracts.cpython-310.pyc,,
+psutil/tests/__pycache__/test_linux.cpython-310.pyc,,
+psutil/tests/__pycache__/test_memleaks.cpython-310.pyc,,
+psutil/tests/__pycache__/test_misc.cpython-310.pyc,,
+psutil/tests/__pycache__/test_osx.cpython-310.pyc,,
+psutil/tests/__pycache__/test_posix.cpython-310.pyc,,
+psutil/tests/__pycache__/test_process.cpython-310.pyc,,
+psutil/tests/__pycache__/test_process_all.cpython-310.pyc,,
+psutil/tests/__pycache__/test_sunos.cpython-310.pyc,,
+psutil/tests/__pycache__/test_system.cpython-310.pyc,,
+psutil/tests/__pycache__/test_testutils.cpython-310.pyc,,
+psutil/tests/__pycache__/test_unicode.cpython-310.pyc,,
+psutil/tests/__pycache__/test_windows.cpython-310.pyc,,
+psutil/tests/runner.py,sha256=WtRnLZ5gS39gIysLCkeV99hw5LvodVwBGesoen9IMNs,11464
+psutil/tests/test_aix.py,sha256=8SKjFw7cR3byBShlvWAzQSOTjji5Bpnk8JyUksR0AQI,4585
+psutil/tests/test_bsd.py,sha256=kfNXLsZ1p-VoGtVX4At9qMOS_zN8OMVP9yecuHEWaC4,21245
+psutil/tests/test_connections.py,sha256=CHL65q2IYxb8ErtJAUh87RyKSCWewwmAg4VFK80arO8,21642
+psutil/tests/test_contracts.py,sha256=_TAWN7ldbgqJIdh6tMkRDivXPoH6T3jKYG47_KzDvtE,12998
+psutil/tests/test_linux.py,sha256=LPvhEPUPSQ4MdxBlARPcR4Thsd_wNqKkqj0JqlKkZCo,92530
+psutil/tests/test_memleaks.py,sha256=pzwEMUaz6Xh8AmN_qiCmYTU2yzot_dwsdbTfFdK89Vk,15012
+psutil/tests/test_misc.py,sha256=JovYMJNShu2yC3t2sfkFY5uW96TL7K5gkwXnjOR25aQ,35117
+psutil/tests/test_osx.py,sha256=MQsepO25TlfydaOxjqgEBM4Wri9SnCUkpBLEiQnKv2Q,6603
+psutil/tests/test_posix.py,sha256=sDam9vdJJWuYNddovhxi1c3K-z4AfTAD0fQyyaeD8YY,17387
+psutil/tests/test_process.py,sha256=lOq3nOyQQvKRJRaTREO0GB0ErOQGZeqxHsEbn5qs8P4,61535
+psutil/tests/test_process_all.py,sha256=qkLXnkVVYEqM2IhhID0CzEVWAmD4Ib7i0E4mUjo8J_Y,16112
+psutil/tests/test_sunos.py,sha256=NhZsHABJKjCEDzGA7ZL62s6NlqpJwpoDybaPB2Bm9HE,1310
+psutil/tests/test_system.py,sha256=rfzGP_ZuMHxzuTz3CuVxn8-zXNe5_9YCm1Z5JTKXqT8,37044
+psutil/tests/test_testutils.py,sha256=mBbsRBbUf4VmDrKTbvlSeVqo_5HTM9h4eTE_VZ_5eXc,14828
+psutil/tests/test_unicode.py,sha256=lKytzxNA72Zdhhz6jnXjVRm-3-79j1zeliPO1SMSVaE,12549
+psutil/tests/test_windows.py,sha256=D_fKdhp8rsXDF7OrBXLRPS7XMpqXAzqJzbrjPqlL5pE,35298
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..cd914569da29261e4b9b92baa594300a56d49711
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/WHEEL
@@ -0,0 +1,8 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: false
+Tag: cp36-abi3-manylinux_2_12_x86_64
+Tag: cp36-abi3-manylinux2010_x86_64
+Tag: cp36-abi3-manylinux_2_17_x86_64
+Tag: cp36-abi3-manylinux2014_x86_64
+
diff --git a/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a4d92cc08db6a0d8bfedbbbd620d1fb11f84677b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/psutil-5.9.8.dist-info/top_level.txt
@@ -0,0 +1 @@
+psutil
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4ae44a0686912d59c1c4a58b858e6611e8b81958
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt
@@ -0,0 +1,933 @@
+Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following
+   disclaimer in the documentation and/or other materials provided
+   with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----
+
+This binary distribution of SciPy also bundles the following software:
+
+
+Name: OpenBLAS
+Files: scipy.libs/libopenblas*.so
+Description: bundled as a dynamically linked library
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause-Attribution
+  Copyright (c) 2011-2014, The OpenBLAS Project
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+  
+     1. Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+  
+     2. Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in
+        the documentation and/or other materials provided with the
+        distribution.
+     3. Neither the name of the OpenBLAS project nor the names of 
+        its contributors may be used to endorse or promote products 
+        derived from this software without specific prior written 
+        permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: LAPACK
+Files: scipy.libs/libopenblas*.so
+Description: bundled in OpenBLAS
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause-Attribution
+  Copyright (c) 1992-2013 The University of Tennessee and The University
+                          of Tennessee Research Foundation.  All rights
+                          reserved.
+  Copyright (c) 2000-2013 The University of California Berkeley. All
+                          rights reserved.
+  Copyright (c) 2006-2013 The University of Colorado Denver.  All rights
+                          reserved.
+  
+  $COPYRIGHT$
+  
+  Additional copyrights may follow
+  
+  $HEADER$
+  
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+  
+  - Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+  
+  - Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer listed
+    in this license in the documentation and/or other materials
+    provided with the distribution.
+  
+  - Neither the name of the copyright holders nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+  
+  The copyright holders provide no reassurances that the source code
+  provided does not infringe any patent, copyright, or any other
+  intellectual property rights of third parties.  The copyright holders
+  disclaim any liability to any recipient for claims brought against
+  recipient by any third party for infringement of that parties
+  intellectual property rights.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: GCC runtime library
+Files: scipy.libs/libgfortran*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+License: GPL-3.0-with-GCC-exception
+  Copyright (C) 2002-2017 Free Software Foundation, Inc.
+  
+  Libgfortran is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3, or (at your option)
+  any later version.
+  
+  Libgfortran is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+  
+  Under Section 7 of GPL version 3, you are granted additional
+  permissions described in the GCC Runtime Library Exception, version
+  3.1, as published by the Free Software Foundation.
+  
+  You should have received a copy of the GNU General Public License and
+  a copy of the GCC Runtime Library Exception along with this program;
+  see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+  .
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc. 
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. 
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    
+    Copyright (C)   
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+      Copyright (C)   
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+.
+
+
+Name: libquadmath
+Files: scipy.libs/libquadmath*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+License: LGPL-2.1-or-later
+
+    GCC Quad-Precision Math Library
+    Copyright (C) 2010-2019 Free Software Foundation, Inc.
+    Written by Francois-Xavier Coudert  
+
+    This file is part of the libquadmath library.
+    Libquadmath is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    Libquadmath is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+    https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b9b3bdbd0fa840b523e33652e3a880d2c9c24420
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA
@@ -0,0 +1,1074 @@
+Metadata-Version: 2.1
+Name: scipy
+Version: 1.13.0
+Summary: Fundamental algorithms for scientific computing in Python
+Home-page: https://scipy.org/
+Maintainer-Email: SciPy Developers 
+License: Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
+        All rights reserved.
+        
+        Redistribution and use in source and binary forms, with or without
+        modification, are permitted provided that the following conditions
+        are met:
+        
+        1. Redistributions of source code must retain the above copyright
+           notice, this list of conditions and the following disclaimer.
+        
+        2. Redistributions in binary form must reproduce the above
+           copyright notice, this list of conditions and the following
+           disclaimer in the documentation and/or other materials provided
+           with the distribution.
+        
+        3. Neither the name of the copyright holder nor the names of its
+           contributors may be used to endorse or promote products derived
+           from this software without specific prior written permission.
+        
+        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+        "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+        LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+        A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+        OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+        LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+        DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+        THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+        (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+        OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+        
+        ----
+        
+        This binary distribution of SciPy also bundles the following software:
+        
+        
+        Name: OpenBLAS
+        Files: scipy.libs/libopenblas*.so
+        Description: bundled as a dynamically linked library
+        Availability: https://github.com/OpenMathLib/OpenBLAS/
+        License: BSD-3-Clause-Attribution
+          Copyright (c) 2011-2014, The OpenBLAS Project
+          All rights reserved.
+          
+          Redistribution and use in source and binary forms, with or without
+          modification, are permitted provided that the following conditions are
+          met:
+          
+             1. Redistributions of source code must retain the above copyright
+                notice, this list of conditions and the following disclaimer.
+          
+             2. Redistributions in binary form must reproduce the above copyright
+                notice, this list of conditions and the following disclaimer in
+                the documentation and/or other materials provided with the
+                distribution.
+             3. Neither the name of the OpenBLAS project nor the names of 
+                its contributors may be used to endorse or promote products 
+                derived from this software without specific prior written 
+                permission.
+          
+          THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+          AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+          IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+          ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+          LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+          DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+          SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+          CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+          OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+          USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+        
+        
+        Name: LAPACK
+        Files: scipy.libs/libopenblas*.so
+        Description: bundled in OpenBLAS
+        Availability: https://github.com/OpenMathLib/OpenBLAS/
+        License: BSD-3-Clause-Attribution
+          Copyright (c) 1992-2013 The University of Tennessee and The University
+                                  of Tennessee Research Foundation.  All rights
+                                  reserved.
+          Copyright (c) 2000-2013 The University of California Berkeley. All
+                                  rights reserved.
+          Copyright (c) 2006-2013 The University of Colorado Denver.  All rights
+                                  reserved.
+          
+          $COPYRIGHT$
+          
+          Additional copyrights may follow
+          
+          $HEADER$
+          
+          Redistribution and use in source and binary forms, with or without
+          modification, are permitted provided that the following conditions are
+          met:
+          
+          - Redistributions of source code must retain the above copyright
+            notice, this list of conditions and the following disclaimer.
+          
+          - Redistributions in binary form must reproduce the above copyright
+            notice, this list of conditions and the following disclaimer listed
+            in this license in the documentation and/or other materials
+            provided with the distribution.
+          
+          - Neither the name of the copyright holders nor the names of its
+            contributors may be used to endorse or promote products derived from
+            this software without specific prior written permission.
+          
+          The copyright holders provide no reassurances that the source code
+          provided does not infringe any patent, copyright, or any other
+          intellectual property rights of third parties.  The copyright holders
+          disclaim any liability to any recipient for claims brought against
+          recipient by any third party for infringement of that parties
+          intellectual property rights.
+          
+          THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+          "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+          LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+          A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+          OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+          SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+          LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+          DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+          THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+          (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+          OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+        
+        
+        Name: GCC runtime library
+        Files: scipy.libs/libgfortran*.so
+        Description: dynamically linked to files compiled with gcc
+        Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+        License: GPL-3.0-with-GCC-exception
+          Copyright (C) 2002-2017 Free Software Foundation, Inc.
+          
+          Libgfortran is free software; you can redistribute it and/or modify
+          it under the terms of the GNU General Public License as published by
+          the Free Software Foundation; either version 3, or (at your option)
+          any later version.
+          
+          Libgfortran is distributed in the hope that it will be useful,
+          but WITHOUT ANY WARRANTY; without even the implied warranty of
+          MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+          GNU General Public License for more details.
+          
+          Under Section 7 of GPL version 3, you are granted additional
+          permissions described in the GCC Runtime Library Exception, version
+          3.1, as published by the Free Software Foundation.
+          
+          You should have received a copy of the GNU General Public License and
+          a copy of the GCC Runtime Library Exception along with this program;
+          see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+          .
+        
+        ----
+        
+        Full text of license texts referred to above follows (that they are
+        listed below does not necessarily imply the conditions apply to the
+        present binary release):
+        
+        ----
+        
+        GCC RUNTIME LIBRARY EXCEPTION
+        
+        Version 3.1, 31 March 2009
+        
+        Copyright (C) 2009 Free Software Foundation, Inc. 
+        
+        Everyone is permitted to copy and distribute verbatim copies of this
+        license document, but changing it is not allowed.
+        
+        This GCC Runtime Library Exception ("Exception") is an additional
+        permission under section 7 of the GNU General Public License, version
+        3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+        bears a notice placed by the copyright holder of the file stating that
+        the file is governed by GPLv3 along with this Exception.
+        
+        When you use GCC to compile a program, GCC may combine portions of
+        certain GCC header files and runtime libraries with the compiled
+        program. The purpose of this Exception is to allow compilation of
+        non-GPL (including proprietary) programs to use, in this way, the
+        header files and runtime libraries covered by this Exception.
+        
+        0. Definitions.
+        
+        A file is an "Independent Module" if it either requires the Runtime
+        Library for execution after a Compilation Process, or makes use of an
+        interface provided by the Runtime Library, but is not otherwise based
+        on the Runtime Library.
+        
+        "GCC" means a version of the GNU Compiler Collection, with or without
+        modifications, governed by version 3 (or a specified later version) of
+        the GNU General Public License (GPL) with the option of using any
+        subsequent versions published by the FSF.
+        
+        "GPL-compatible Software" is software whose conditions of propagation,
+        modification and use would permit combination with GCC in accord with
+        the license of GCC.
+        
+        "Target Code" refers to output from any compiler for a real or virtual
+        target processor architecture, in executable form or suitable for
+        input to an assembler, loader, linker and/or execution
+        phase. Notwithstanding that, Target Code does not include data in any
+        format that is used as a compiler intermediate representation, or used
+        for producing a compiler intermediate representation.
+        
+        The "Compilation Process" transforms code entirely represented in
+        non-intermediate languages designed for human-written code, and/or in
+        Java Virtual Machine byte code, into Target Code. Thus, for example,
+        use of source code generators and preprocessors need not be considered
+        part of the Compilation Process, since the Compilation Process can be
+        understood as starting with the output of the generators or
+        preprocessors.
+        
+        A Compilation Process is "Eligible" if it is done using GCC, alone or
+        with other GPL-compatible software, or if it is done without using any
+        work based on GCC. For example, using non-GPL-compatible Software to
+        optimize any GCC intermediate representations would not qualify as an
+        Eligible Compilation Process.
+        
+        1. Grant of Additional Permission.
+        
+        You have permission to propagate a work of Target Code formed by
+        combining the Runtime Library with Independent Modules, even if such
+        propagation would otherwise violate the terms of GPLv3, provided that
+        all Target Code was generated by Eligible Compilation Processes. You
+        may then convey such a combination under terms of your choice,
+        consistent with the licensing of the Independent Modules.
+        
+        2. No Weakening of GCC Copyleft.
+        
+        The availability of this Exception does not imply any general
+        presumption that third-party software is unaffected by the copyleft
+        requirements of the license of GCC.
+        
+        ----
+        
+                            GNU GENERAL PUBLIC LICENSE
+                               Version 3, 29 June 2007
+        
+         Copyright (C) 2007 Free Software Foundation, Inc. 
+         Everyone is permitted to copy and distribute verbatim copies
+         of this license document, but changing it is not allowed.
+        
+                                    Preamble
+        
+          The GNU General Public License is a free, copyleft license for
+        software and other kinds of works.
+        
+          The licenses for most software and other practical works are designed
+        to take away your freedom to share and change the works.  By contrast,
+        the GNU General Public License is intended to guarantee your freedom to
+        share and change all versions of a program--to make sure it remains free
+        software for all its users.  We, the Free Software Foundation, use the
+        GNU General Public License for most of our software; it applies also to
+        any other work released this way by its authors.  You can apply it to
+        your programs, too.
+        
+          When we speak of free software, we are referring to freedom, not
+        price.  Our General Public Licenses are designed to make sure that you
+        have the freedom to distribute copies of free software (and charge for
+        them if you wish), that you receive source code or can get it if you
+        want it, that you can change the software or use pieces of it in new
+        free programs, and that you know you can do these things.
+        
+          To protect your rights, we need to prevent others from denying you
+        these rights or asking you to surrender the rights.  Therefore, you have
+        certain responsibilities if you distribute copies of the software, or if
+        you modify it: responsibilities to respect the freedom of others.
+        
+          For example, if you distribute copies of such a program, whether
+        gratis or for a fee, you must pass on to the recipients the same
+        freedoms that you received.  You must make sure that they, too, receive
+        or can get the source code.  And you must show them these terms so they
+        know their rights.
+        
+          Developers that use the GNU GPL protect your rights with two steps:
+        (1) assert copyright on the software, and (2) offer you this License
+        giving you legal permission to copy, distribute and/or modify it.
+        
+          For the developers' and authors' protection, the GPL clearly explains
+        that there is no warranty for this free software.  For both users' and
+        authors' sake, the GPL requires that modified versions be marked as
+        changed, so that their problems will not be attributed erroneously to
+        authors of previous versions.
+        
+          Some devices are designed to deny users access to install or run
+        modified versions of the software inside them, although the manufacturer
+        can do so.  This is fundamentally incompatible with the aim of
+        protecting users' freedom to change the software.  The systematic
+        pattern of such abuse occurs in the area of products for individuals to
+        use, which is precisely where it is most unacceptable.  Therefore, we
+        have designed this version of the GPL to prohibit the practice for those
+        products.  If such problems arise substantially in other domains, we
+        stand ready to extend this provision to those domains in future versions
+        of the GPL, as needed to protect the freedom of users.
+        
+          Finally, every program is threatened constantly by software patents.
+        States should not allow patents to restrict development and use of
+        software on general-purpose computers, but in those that do, we wish to
+        avoid the special danger that patents applied to a free program could
+        make it effectively proprietary.  To prevent this, the GPL assures that
+        patents cannot be used to render the program non-free.
+        
+          The precise terms and conditions for copying, distribution and
+        modification follow.
+        
+                               TERMS AND CONDITIONS
+        
+          0. Definitions.
+        
+          "This License" refers to version 3 of the GNU General Public License.
+        
+          "Copyright" also means copyright-like laws that apply to other kinds of
+        works, such as semiconductor masks.
+        
+          "The Program" refers to any copyrightable work licensed under this
+        License.  Each licensee is addressed as "you".  "Licensees" and
+        "recipients" may be individuals or organizations.
+        
+          To "modify" a work means to copy from or adapt all or part of the work
+        in a fashion requiring copyright permission, other than the making of an
+        exact copy.  The resulting work is called a "modified version" of the
+        earlier work or a work "based on" the earlier work.
+        
+          A "covered work" means either the unmodified Program or a work based
+        on the Program.
+        
+          To "propagate" a work means to do anything with it that, without
+        permission, would make you directly or secondarily liable for
+        infringement under applicable copyright law, except executing it on a
+        computer or modifying a private copy.  Propagation includes copying,
+        distribution (with or without modification), making available to the
+        public, and in some countries other activities as well.
+        
+          To "convey" a work means any kind of propagation that enables other
+        parties to make or receive copies.  Mere interaction with a user through
+        a computer network, with no transfer of a copy, is not conveying.
+        
+          An interactive user interface displays "Appropriate Legal Notices"
+        to the extent that it includes a convenient and prominently visible
+        feature that (1) displays an appropriate copyright notice, and (2)
+        tells the user that there is no warranty for the work (except to the
+        extent that warranties are provided), that licensees may convey the
+        work under this License, and how to view a copy of this License.  If
+        the interface presents a list of user commands or options, such as a
+        menu, a prominent item in the list meets this criterion.
+        
+          1. Source Code.
+        
+          The "source code" for a work means the preferred form of the work
+        for making modifications to it.  "Object code" means any non-source
+        form of a work.
+        
+          A "Standard Interface" means an interface that either is an official
+        standard defined by a recognized standards body, or, in the case of
+        interfaces specified for a particular programming language, one that
+        is widely used among developers working in that language.
+        
+          The "System Libraries" of an executable work include anything, other
+        than the work as a whole, that (a) is included in the normal form of
+        packaging a Major Component, but which is not part of that Major
+        Component, and (b) serves only to enable use of the work with that
+        Major Component, or to implement a Standard Interface for which an
+        implementation is available to the public in source code form.  A
+        "Major Component", in this context, means a major essential component
+        (kernel, window system, and so on) of the specific operating system
+        (if any) on which the executable work runs, or a compiler used to
+        produce the work, or an object code interpreter used to run it.
+        
+          The "Corresponding Source" for a work in object code form means all
+        the source code needed to generate, install, and (for an executable
+        work) run the object code and to modify the work, including scripts to
+        control those activities.  However, it does not include the work's
+        System Libraries, or general-purpose tools or generally available free
+        programs which are used unmodified in performing those activities but
+        which are not part of the work.  For example, Corresponding Source
+        includes interface definition files associated with source files for
+        the work, and the source code for shared libraries and dynamically
+        linked subprograms that the work is specifically designed to require,
+        such as by intimate data communication or control flow between those
+        subprograms and other parts of the work.
+        
+          The Corresponding Source need not include anything that users
+        can regenerate automatically from other parts of the Corresponding
+        Source.
+        
+          The Corresponding Source for a work in source code form is that
+        same work.
+        
+          2. Basic Permissions.
+        
+          All rights granted under this License are granted for the term of
+        copyright on the Program, and are irrevocable provided the stated
+        conditions are met.  This License explicitly affirms your unlimited
+        permission to run the unmodified Program.  The output from running a
+        covered work is covered by this License only if the output, given its
+        content, constitutes a covered work.  This License acknowledges your
+        rights of fair use or other equivalent, as provided by copyright law.
+        
+          You may make, run and propagate covered works that you do not
+        convey, without conditions so long as your license otherwise remains
+        in force.  You may convey covered works to others for the sole purpose
+        of having them make modifications exclusively for you, or provide you
+        with facilities for running those works, provided that you comply with
+        the terms of this License in conveying all material for which you do
+        not control copyright.  Those thus making or running the covered works
+        for you must do so exclusively on your behalf, under your direction
+        and control, on terms that prohibit them from making any copies of
+        your copyrighted material outside their relationship with you.
+        
+          Conveying under any other circumstances is permitted solely under
+        the conditions stated below.  Sublicensing is not allowed; section 10
+        makes it unnecessary.
+        
+          3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+        
+          No covered work shall be deemed part of an effective technological
+        measure under any applicable law fulfilling obligations under article
+        11 of the WIPO copyright treaty adopted on 20 December 1996, or
+        similar laws prohibiting or restricting circumvention of such
+        measures.
+        
+          When you convey a covered work, you waive any legal power to forbid
+        circumvention of technological measures to the extent such circumvention
+        is effected by exercising rights under this License with respect to
+        the covered work, and you disclaim any intention to limit operation or
+        modification of the work as a means of enforcing, against the work's
+        users, your or third parties' legal rights to forbid circumvention of
+        technological measures.
+        
+          4. Conveying Verbatim Copies.
+        
+          You may convey verbatim copies of the Program's source code as you
+        receive it, in any medium, provided that you conspicuously and
+        appropriately publish on each copy an appropriate copyright notice;
+        keep intact all notices stating that this License and any
+        non-permissive terms added in accord with section 7 apply to the code;
+        keep intact all notices of the absence of any warranty; and give all
+        recipients a copy of this License along with the Program.
+        
+          You may charge any price or no price for each copy that you convey,
+        and you may offer support or warranty protection for a fee.
+        
+          5. Conveying Modified Source Versions.
+        
+          You may convey a work based on the Program, or the modifications to
+        produce it from the Program, in the form of source code under the
+        terms of section 4, provided that you also meet all of these conditions:
+        
+            a) The work must carry prominent notices stating that you modified
+            it, and giving a relevant date.
+        
+            b) The work must carry prominent notices stating that it is
+            released under this License and any conditions added under section
+            7.  This requirement modifies the requirement in section 4 to
+            "keep intact all notices".
+        
+            c) You must license the entire work, as a whole, under this
+            License to anyone who comes into possession of a copy.  This
+            License will therefore apply, along with any applicable section 7
+            additional terms, to the whole of the work, and all its parts,
+            regardless of how they are packaged.  This License gives no
+            permission to license the work in any other way, but it does not
+            invalidate such permission if you have separately received it.
+        
+            d) If the work has interactive user interfaces, each must display
+            Appropriate Legal Notices; however, if the Program has interactive
+            interfaces that do not display Appropriate Legal Notices, your
+            work need not make them do so.
+        
+          A compilation of a covered work with other separate and independent
+        works, which are not by their nature extensions of the covered work,
+        and which are not combined with it such as to form a larger program,
+        in or on a volume of a storage or distribution medium, is called an
+        "aggregate" if the compilation and its resulting copyright are not
+        used to limit the access or legal rights of the compilation's users
+        beyond what the individual works permit.  Inclusion of a covered work
+        in an aggregate does not cause this License to apply to the other
+        parts of the aggregate.
+        
+          6. Conveying Non-Source Forms.
+        
+          You may convey a covered work in object code form under the terms
+        of sections 4 and 5, provided that you also convey the
+        machine-readable Corresponding Source under the terms of this License,
+        in one of these ways:
+        
+            a) Convey the object code in, or embodied in, a physical product
+            (including a physical distribution medium), accompanied by the
+            Corresponding Source fixed on a durable physical medium
+            customarily used for software interchange.
+        
+            b) Convey the object code in, or embodied in, a physical product
+            (including a physical distribution medium), accompanied by a
+            written offer, valid for at least three years and valid for as
+            long as you offer spare parts or customer support for that product
+            model, to give anyone who possesses the object code either (1) a
+            copy of the Corresponding Source for all the software in the
+            product that is covered by this License, on a durable physical
+            medium customarily used for software interchange, for a price no
+            more than your reasonable cost of physically performing this
+            conveying of source, or (2) access to copy the
+            Corresponding Source from a network server at no charge.
+        
+            c) Convey individual copies of the object code with a copy of the
+            written offer to provide the Corresponding Source.  This
+            alternative is allowed only occasionally and noncommercially, and
+            only if you received the object code with such an offer, in accord
+            with subsection 6b.
+        
+            d) Convey the object code by offering access from a designated
+            place (gratis or for a charge), and offer equivalent access to the
+            Corresponding Source in the same way through the same place at no
+            further charge.  You need not require recipients to copy the
+            Corresponding Source along with the object code.  If the place to
+            copy the object code is a network server, the Corresponding Source
+            may be on a different server (operated by you or a third party)
+            that supports equivalent copying facilities, provided you maintain
+            clear directions next to the object code saying where to find the
+            Corresponding Source.  Regardless of what server hosts the
+            Corresponding Source, you remain obligated to ensure that it is
+            available for as long as needed to satisfy these requirements.
+        
+            e) Convey the object code using peer-to-peer transmission, provided
+            you inform other peers where the object code and Corresponding
+            Source of the work are being offered to the general public at no
+            charge under subsection 6d.
+        
+          A separable portion of the object code, whose source code is excluded
+        from the Corresponding Source as a System Library, need not be
+        included in conveying the object code work.
+        
+          A "User Product" is either (1) a "consumer product", which means any
+        tangible personal property which is normally used for personal, family,
+        or household purposes, or (2) anything designed or sold for incorporation
+        into a dwelling.  In determining whether a product is a consumer product,
+        doubtful cases shall be resolved in favor of coverage.  For a particular
+        product received by a particular user, "normally used" refers to a
+        typical or common use of that class of product, regardless of the status
+        of the particular user or of the way in which the particular user
+        actually uses, or expects or is expected to use, the product.  A product
+        is a consumer product regardless of whether the product has substantial
+        commercial, industrial or non-consumer uses, unless such uses represent
+        the only significant mode of use of the product.
+        
+          "Installation Information" for a User Product means any methods,
+        procedures, authorization keys, or other information required to install
+        and execute modified versions of a covered work in that User Product from
+        a modified version of its Corresponding Source.  The information must
+        suffice to ensure that the continued functioning of the modified object
+        code is in no case prevented or interfered with solely because
+        modification has been made.
+        
+          If you convey an object code work under this section in, or with, or
+        specifically for use in, a User Product, and the conveying occurs as
+        part of a transaction in which the right of possession and use of the
+        User Product is transferred to the recipient in perpetuity or for a
+        fixed term (regardless of how the transaction is characterized), the
+        Corresponding Source conveyed under this section must be accompanied
+        by the Installation Information.  But this requirement does not apply
+        if neither you nor any third party retains the ability to install
+        modified object code on the User Product (for example, the work has
+        been installed in ROM).
+        
+          The requirement to provide Installation Information does not include a
+        requirement to continue to provide support service, warranty, or updates
+        for a work that has been modified or installed by the recipient, or for
+        the User Product in which it has been modified or installed.  Access to a
+        network may be denied when the modification itself materially and
+        adversely affects the operation of the network or violates the rules and
+        protocols for communication across the network.
+        
+          Corresponding Source conveyed, and Installation Information provided,
+        in accord with this section must be in a format that is publicly
+        documented (and with an implementation available to the public in
+        source code form), and must require no special password or key for
+        unpacking, reading or copying.
+        
+          7. Additional Terms.
+        
+          "Additional permissions" are terms that supplement the terms of this
+        License by making exceptions from one or more of its conditions.
+        Additional permissions that are applicable to the entire Program shall
+        be treated as though they were included in this License, to the extent
+        that they are valid under applicable law.  If additional permissions
+        apply only to part of the Program, that part may be used separately
+        under those permissions, but the entire Program remains governed by
+        this License without regard to the additional permissions.
+        
+          When you convey a copy of a covered work, you may at your option
+        remove any additional permissions from that copy, or from any part of
+        it.  (Additional permissions may be written to require their own
+        removal in certain cases when you modify the work.)  You may place
+        additional permissions on material, added by you to a covered work,
+        for which you have or can give appropriate copyright permission.
+        
+          Notwithstanding any other provision of this License, for material you
+        add to a covered work, you may (if authorized by the copyright holders of
+        that material) supplement the terms of this License with terms:
+        
+            a) Disclaiming warranty or limiting liability differently from the
+            terms of sections 15 and 16 of this License; or
+        
+            b) Requiring preservation of specified reasonable legal notices or
+            author attributions in that material or in the Appropriate Legal
+            Notices displayed by works containing it; or
+        
+            c) Prohibiting misrepresentation of the origin of that material, or
+            requiring that modified versions of such material be marked in
+            reasonable ways as different from the original version; or
+        
+            d) Limiting the use for publicity purposes of names of licensors or
+            authors of the material; or
+        
+            e) Declining to grant rights under trademark law for use of some
+            trade names, trademarks, or service marks; or
+        
+            f) Requiring indemnification of licensors and authors of that
+            material by anyone who conveys the material (or modified versions of
+            it) with contractual assumptions of liability to the recipient, for
+            any liability that these contractual assumptions directly impose on
+            those licensors and authors.
+        
+          All other non-permissive additional terms are considered "further
+        restrictions" within the meaning of section 10.  If the Program as you
+        received it, or any part of it, contains a notice stating that it is
+        governed by this License along with a term that is a further
+        restriction, you may remove that term.  If a license document contains
+        a further restriction but permits relicensing or conveying under this
+        License, you may add to a covered work material governed by the terms
+        of that license document, provided that the further restriction does
+        not survive such relicensing or conveying.
+        
+          If you add terms to a covered work in accord with this section, you
+        must place, in the relevant source files, a statement of the
+        additional terms that apply to those files, or a notice indicating
+        where to find the applicable terms.
+        
+          Additional terms, permissive or non-permissive, may be stated in the
+        form of a separately written license, or stated as exceptions;
+        the above requirements apply either way.
+        
+          8. Termination.
+        
+          You may not propagate or modify a covered work except as expressly
+        provided under this License.  Any attempt otherwise to propagate or
+        modify it is void, and will automatically terminate your rights under
+        this License (including any patent licenses granted under the third
+        paragraph of section 11).
+        
+          However, if you cease all violation of this License, then your
+        license from a particular copyright holder is reinstated (a)
+        provisionally, unless and until the copyright holder explicitly and
+        finally terminates your license, and (b) permanently, if the copyright
+        holder fails to notify you of the violation by some reasonable means
+        prior to 60 days after the cessation.
+        
+          Moreover, your license from a particular copyright holder is
+        reinstated permanently if the copyright holder notifies you of the
+        violation by some reasonable means, this is the first time you have
+        received notice of violation of this License (for any work) from that
+        copyright holder, and you cure the violation prior to 30 days after
+        your receipt of the notice.
+        
+          Termination of your rights under this section does not terminate the
+        licenses of parties who have received copies or rights from you under
+        this License.  If your rights have been terminated and not permanently
+        reinstated, you do not qualify to receive new licenses for the same
+        material under section 10.
+        
+          9. Acceptance Not Required for Having Copies.
+        
+          You are not required to accept this License in order to receive or
+        run a copy of the Program.  Ancillary propagation of a covered work
+        occurring solely as a consequence of using peer-to-peer transmission
+        to receive a copy likewise does not require acceptance.  However,
+        nothing other than this License grants you permission to propagate or
+        modify any covered work.  These actions infringe copyright if you do
+        not accept this License.  Therefore, by modifying or propagating a
+        covered work, you indicate your acceptance of this License to do so.
+        
+          10. Automatic Licensing of Downstream Recipients.
+        
+          Each time you convey a covered work, the recipient automatically
+        receives a license from the original licensors, to run, modify and
+        propagate that work, subject to this License.  You are not responsible
+        for enforcing compliance by third parties with this License.
+        
+          An "entity transaction" is a transaction transferring control of an
+        organization, or substantially all assets of one, or subdividing an
+        organization, or merging organizations.  If propagation of a covered
+        work results from an entity transaction, each party to that
+        transaction who receives a copy of the work also receives whatever
+        licenses to the work the party's predecessor in interest had or could
+        give under the previous paragraph, plus a right to possession of the
+        Corresponding Source of the work from the predecessor in interest, if
+        the predecessor has it or can get it with reasonable efforts.
+        
+          You may not impose any further restrictions on the exercise of the
+        rights granted or affirmed under this License.  For example, you may
+        not impose a license fee, royalty, or other charge for exercise of
+        rights granted under this License, and you may not initiate litigation
+        (including a cross-claim or counterclaim in a lawsuit) alleging that
+        any patent claim is infringed by making, using, selling, offering for
+        sale, or importing the Program or any portion of it.
+        
+          11. Patents.
+        
+          A "contributor" is a copyright holder who authorizes use under this
+        License of the Program or a work on which the Program is based.  The
+        work thus licensed is called the contributor's "contributor version".
+        
+          A contributor's "essential patent claims" are all patent claims
+        owned or controlled by the contributor, whether already acquired or
+        hereafter acquired, that would be infringed by some manner, permitted
+        by this License, of making, using, or selling its contributor version,
+        but do not include claims that would be infringed only as a
+        consequence of further modification of the contributor version.  For
+        purposes of this definition, "control" includes the right to grant
+        patent sublicenses in a manner consistent with the requirements of
+        this License.
+        
+          Each contributor grants you a non-exclusive, worldwide, royalty-free
+        patent license under the contributor's essential patent claims, to
+        make, use, sell, offer for sale, import and otherwise run, modify and
+        propagate the contents of its contributor version.
+        
+          In the following three paragraphs, a "patent license" is any express
+        agreement or commitment, however denominated, not to enforce a patent
+        (such as an express permission to practice a patent or covenant not to
+        sue for patent infringement).  To "grant" such a patent license to a
+        party means to make such an agreement or commitment not to enforce a
+        patent against the party.
+        
+          If you convey a covered work, knowingly relying on a patent license,
+        and the Corresponding Source of the work is not available for anyone
+        to copy, free of charge and under the terms of this License, through a
+        publicly available network server or other readily accessible means,
+        then you must either (1) cause the Corresponding Source to be so
+        available, or (2) arrange to deprive yourself of the benefit of the
+        patent license for this particular work, or (3) arrange, in a manner
+        consistent with the requirements of this License, to extend the patent
+        license to downstream recipients.  "Knowingly relying" means you have
+        actual knowledge that, but for the patent license, your conveying the
+        covered work in a country, or your recipient's use of the covered work
+        in a country, would infringe one or more identifiable patents in that
+        country that you have reason to believe are valid.
+        
+          If, pursuant to or in connection with a single transaction or
+        arrangement, you convey, or propagate by procuring conveyance of, a
+        covered work, and grant a patent license to some of the parties
+        receiving the covered work authorizing them to use, propagate, modify
+        or convey a specific copy of the covered work, then the patent license
+        you grant is automatically extended to all recipients of the covered
+        work and works based on it.
+        
+          A patent license is "discriminatory" if it does not include within
+        the scope of its coverage, prohibits the exercise of, or is
+        conditioned on the non-exercise of one or more of the rights that are
+        specifically granted under this License.  You may not convey a covered
+        work if you are a party to an arrangement with a third party that is
+        in the business of distributing software, under which you make payment
+        to the third party based on the extent of your activity of conveying
+        the work, and under which the third party grants, to any of the
+        parties who would receive the covered work from you, a discriminatory
+        patent license (a) in connection with copies of the covered work
+        conveyed by you (or copies made from those copies), or (b) primarily
+        for and in connection with specific products or compilations that
+        contain the covered work, unless you entered into that arrangement,
+        or that patent license was granted, prior to 28 March 2007.
+        
+          Nothing in this License shall be construed as excluding or limiting
+        any implied license or other defenses to infringement that may
+        otherwise be available to you under applicable patent law.
+        
+          12. No Surrender of Others' Freedom.
+        
+          If conditions are imposed on you (whether by court order, agreement or
+        otherwise) that contradict the conditions of this License, they do not
+        excuse you from the conditions of this License.  If you cannot convey a
+        covered work so as to satisfy simultaneously your obligations under this
+        License and any other pertinent obligations, then as a consequence you may
+        not convey it at all.  For example, if you agree to terms that obligate you
+        to collect a royalty for further conveying from those to whom you convey
+        the Program, the only way you could satisfy both those terms and this
+        License would be to refrain entirely from conveying the Program.
+        
+          13. Use with the GNU Affero General Public License.
+        
+          Notwithstanding any other provision of this License, you have
+        permission to link or combine any covered work with a work licensed
+        under version 3 of the GNU Affero General Public License into a single
+        combined work, and to convey the resulting work.  The terms of this
+        License will continue to apply to the part which is the covered work,
+        but the special requirements of the GNU Affero General Public License,
+        section 13, concerning interaction through a network will apply to the
+        combination as such.
+        
+          14. Revised Versions of this License.
+        
+          The Free Software Foundation may publish revised and/or new versions of
+        the GNU General Public License from time to time.  Such new versions will
+        be similar in spirit to the present version, but may differ in detail to
+        address new problems or concerns.
+        
+          Each version is given a distinguishing version number.  If the
+        Program specifies that a certain numbered version of the GNU General
+        Public License "or any later version" applies to it, you have the
+        option of following the terms and conditions either of that numbered
+        version or of any later version published by the Free Software
+        Foundation.  If the Program does not specify a version number of the
+        GNU General Public License, you may choose any version ever published
+        by the Free Software Foundation.
+        
+          If the Program specifies that a proxy can decide which future
+        versions of the GNU General Public License can be used, that proxy's
+        public statement of acceptance of a version permanently authorizes you
+        to choose that version for the Program.
+        
+          Later license versions may give you additional or different
+        permissions.  However, no additional obligations are imposed on any
+        author or copyright holder as a result of your choosing to follow a
+        later version.
+        
+          15. Disclaimer of Warranty.
+        
+          THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+        APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+        HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+        OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+        THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+        PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+        IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+        ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+        
+          16. Limitation of Liability.
+        
+          IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+        WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+        THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+        GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+        USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+        DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+        PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+        EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+        SUCH DAMAGES.
+        
+          17. Interpretation of Sections 15 and 16.
+        
+          If the disclaimer of warranty and limitation of liability provided
+        above cannot be given local legal effect according to their terms,
+        reviewing courts shall apply local law that most closely approximates
+        an absolute waiver of all civil liability in connection with the
+        Program, unless a warranty or assumption of liability accompanies a
+        copy of the Program in return for a fee.
+        
+                             END OF TERMS AND CONDITIONS
+        
+                    How to Apply These Terms to Your New Programs
+        
+          If you develop a new program, and you want it to be of the greatest
+        possible use to the public, the best way to achieve this is to make it
+        free software which everyone can redistribute and change under these terms.
+        
+          To do so, attach the following notices to the program.  It is safest
+        to attach them to the start of each source file to most effectively
+        state the exclusion of warranty; and each file should have at least
+        the "copyright" line and a pointer to where the full notice is found.
+        
+            
+            Copyright (C)   
+        
+            This program is free software: you can redistribute it and/or modify
+            it under the terms of the GNU General Public License as published by
+            the Free Software Foundation, either version 3 of the License, or
+            (at your option) any later version.
+        
+            This program is distributed in the hope that it will be useful,
+            but WITHOUT ANY WARRANTY; without even the implied warranty of
+            MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+            GNU General Public License for more details.
+        
+            You should have received a copy of the GNU General Public License
+            along with this program.  If not, see .
+        
+        Also add information on how to contact you by electronic and paper mail.
+        
+          If the program does terminal interaction, make it output a short
+        notice like this when it starts in an interactive mode:
+        
+              Copyright (C)   
+            This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+            This is free software, and you are welcome to redistribute it
+            under certain conditions; type `show c' for details.
+        
+        The hypothetical commands `show w' and `show c' should show the appropriate
+        parts of the General Public License.  Of course, your program's commands
+        might be different; for a GUI interface, you would use an "about box".
+        
+          You should also get your employer (if you work as a programmer) or school,
+        if any, to sign a "copyright disclaimer" for the program, if necessary.
+        For more information on this, and how to apply and follow the GNU GPL, see
+        .
+        
+          The GNU General Public License does not permit incorporating your program
+        into proprietary programs.  If your program is a subroutine library, you
+        may consider it more useful to permit linking proprietary applications with
+        the library.  If this is what you want to do, use the GNU Lesser General
+        Public License instead of this License.  But first, please read
+        .
+        
+        
+        Name: libquadmath
+        Files: scipy.libs/libquadmath*.so
+        Description: dynamically linked to files compiled with gcc
+        Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+        License: LGPL-2.1-or-later
+        
+            GCC Quad-Precision Math Library
+            Copyright (C) 2010-2019 Free Software Foundation, Inc.
+            Written by Francois-Xavier Coudert  
+        
+            This file is part of the libquadmath library.
+            Libquadmath is free software; you can redistribute it and/or
+            modify it under the terms of the GNU Library General Public
+            License as published by the Free Software Foundation; either
+            version 2.1 of the License, or (at your option) any later version.
+        
+            Libquadmath is distributed in the hope that it will be useful,
+            but WITHOUT ANY WARRANTY; without even the implied warranty of
+            MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+            Lesser General Public License for more details.
+            https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Scientific/Engineering
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS
+Project-URL: Homepage, https://scipy.org/
+Project-URL: Documentation, https://docs.scipy.org/doc/scipy/
+Project-URL: Source, https://github.com/scipy/scipy
+Project-URL: Download, https://github.com/scipy/scipy/releases
+Project-URL: Tracker, https://github.com/scipy/scipy/issues
+Requires-Python: >=3.9
+Requires-Dist: numpy<2.3,>=1.22.4
+Requires-Dist: pytest; extra == "test"
+Requires-Dist: pytest-cov; extra == "test"
+Requires-Dist: pytest-timeout; extra == "test"
+Requires-Dist: pytest-xdist; extra == "test"
+Requires-Dist: asv; extra == "test"
+Requires-Dist: mpmath; extra == "test"
+Requires-Dist: gmpy2; extra == "test"
+Requires-Dist: threadpoolctl; extra == "test"
+Requires-Dist: scikit-umfpack; extra == "test"
+Requires-Dist: pooch; extra == "test"
+Requires-Dist: hypothesis>=6.30; extra == "test"
+Requires-Dist: array-api-strict; extra == "test"
+Requires-Dist: sphinx>=5.0.0; extra == "doc"
+Requires-Dist: pydata-sphinx-theme>=0.15.2; extra == "doc"
+Requires-Dist: sphinx-design>=0.4.0; extra == "doc"
+Requires-Dist: matplotlib>=3.5; extra == "doc"
+Requires-Dist: numpydoc; extra == "doc"
+Requires-Dist: jupytext; extra == "doc"
+Requires-Dist: myst-nb; extra == "doc"
+Requires-Dist: pooch; extra == "doc"
+Requires-Dist: jupyterlite-sphinx>=0.12.0; extra == "doc"
+Requires-Dist: jupyterlite-pyodide-kernel; extra == "doc"
+Requires-Dist: mypy; extra == "dev"
+Requires-Dist: typing_extensions; extra == "dev"
+Requires-Dist: types-psutil; extra == "dev"
+Requires-Dist: pycodestyle; extra == "dev"
+Requires-Dist: ruff; extra == "dev"
+Requires-Dist: cython-lint>=0.12.2; extra == "dev"
+Requires-Dist: rich-click; extra == "dev"
+Requires-Dist: doit>=0.36.0; extra == "dev"
+Requires-Dist: pydevtool; extra == "dev"
+Provides-Extra: test
+Provides-Extra: doc
+Provides-Extra: dev
+Description-Content-Type: text/x-rst
+
+.. image:: https://raw.githubusercontent.com/scipy/scipy/main/doc/source/_static/logo.svg
+  :target: https://scipy.org
+  :width: 110
+  :height: 110
+  :align: left 
+
+.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A
+  :target: https://numfocus.org
+
+.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads
+  :target: https://pypi.org/project/scipy/
+
+.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads
+  :target: https://anaconda.org/conda-forge/scipy
+
+.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg
+  :target: https://stackoverflow.com/questions/tagged/scipy
+
+.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue
+  :target: https://www.nature.com/articles/s41592-019-0686-2
+
+SciPy (pronounced "Sigh Pie") is an open-source software for mathematics,
+science, and engineering. It includes modules for statistics, optimization,
+integration, linear algebra, Fourier transforms, signal and image processing,
+ODE solvers, and more.
+
+- **Website:** https://scipy.org
+- **Documentation:** https://docs.scipy.org/doc/scipy/
+- **Development version of the documentation:** https://scipy.github.io/devdocs
+- **Mailing list:** https://mail.python.org/mailman3/lists/scipy-dev.python.org/
+- **Source code:** https://github.com/scipy/scipy
+- **Contributing:** https://scipy.github.io/devdocs/dev/index.html
+- **Bug reports:** https://github.com/scipy/scipy/issues
+- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html
+- **Report a security vulnerability:** https://tidelift.com/docs/security
+- **Citing in your work:** https://www.scipy.org/citing-scipy/
+
+SciPy is built to work with
+NumPy arrays, and provides many user-friendly and efficient numerical routines,
+such as routines for numerical integration and optimization. Together, they
+run on all popular operating systems, are quick to install, and are free of
+charge. NumPy and SciPy are easy to use, but powerful enough to be depended
+upon by some of the world's leading scientists and engineers. If you need to
+manipulate numbers on a computer and display or publish the results, give
+SciPy a try!
+
+For the installation instructions, see `our install
+guide `__.
+
+
+Call for Contributions
+----------------------
+
+We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good
+first issue" may be a good starting point. Have a look at `our contributing
+guide `__.
+
+Writing code isn’t the only way to contribute to SciPy. You can also:
+
+- review pull requests
+- triage issues
+- develop tutorials, presentations, and other educational materials
+- maintain and improve `our website `__
+- develop graphic design for our brand assets and promotional materials
+- help with outreach and onboard new contributors
+- write grant proposals and help with other fundraising efforts
+
+If you’re unsure where to start or how your skills fit in, reach out! You can
+ask on the mailing list or here, on GitHub, by leaving a
+comment on a relevant issue that is already open.
+
+If you are new to contributing to open source, `this
+guide `__ helps explain why, what,
+and how to get involved.
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..3b4d31004dc26960f394757d13db47949f604c93
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD
@@ -0,0 +1,2175 @@
+scipy-1.13.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+scipy-1.13.0.dist-info/LICENSE.txt,sha256=GBKL4U2eo7yUQAdaiYqUjjMn6WiG0BZ47o4FJRBXFYA,46805
+scipy-1.13.0.dist-info/METADATA,sha256=pupwB-tQg2FOnYO6j6nIUevhfbVQotRECCPdJznBYcE,60568
+scipy-1.13.0.dist-info/RECORD,,
+scipy-1.13.0.dist-info/WHEEL,sha256=sZM_NeUMz2G4fDenMf11eikcCxcLaQWiYRmjwQBavQs,137
+scipy.libs/libgfortran-040039e1.so.5.0.0,sha256=FK-zEpsai1C8QKOwggx_EVLqm8EBIaqxUpQ_cFdHKIY,2686065
+scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so,sha256=CfADHQasbypnAQQRplB4SeqoJnVMpVOpVFmsGKR3Xl8,34990041
+scipy.libs/libquadmath-96973f99.so.0.0.0,sha256=k0wi3tDn0WnE1GeIdslgUa3z2UVF2pYvYLQWWbB12js,247609
+scipy/__config__.py,sha256=CJllCNEYJv3O910L7fzfB9zF2yCR0KTocgM88LMaPEI,5087
+scipy/__init__.py,sha256=8J2KNCrLUruYIHP76yWU2TY_9VQz091xAGYULbfAvuk,4144
+scipy/__pycache__/__config__.cpython-310.pyc,,
+scipy/__pycache__/__init__.cpython-310.pyc,,
+scipy/__pycache__/_distributor_init.cpython-310.pyc,,
+scipy/__pycache__/conftest.cpython-310.pyc,,
+scipy/__pycache__/version.cpython-310.pyc,,
+scipy/_distributor_init.py,sha256=zJThN3Fvof09h24804pNDPd2iN-lCHV3yPlZylSefgQ,611
+scipy/_lib/__init__.py,sha256=CXrH_YBpZ-HImHHrqXIhQt_vevp4P5NXClp7hnFMVLM,353
+scipy/_lib/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/__pycache__/_array_api.cpython-310.pyc,,
+scipy/_lib/__pycache__/_bunch.cpython-310.pyc,,
+scipy/_lib/__pycache__/_ccallback.cpython-310.pyc,,
+scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc,,
+scipy/_lib/__pycache__/_docscrape.cpython-310.pyc,,
+scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc,,
+scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc,,
+scipy/_lib/__pycache__/_gcutils.cpython-310.pyc,,
+scipy/_lib/__pycache__/_pep440.cpython-310.pyc,,
+scipy/_lib/__pycache__/_testutils.cpython-310.pyc,,
+scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc,,
+scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc,,
+scipy/_lib/__pycache__/_util.cpython-310.pyc,,
+scipy/_lib/__pycache__/decorator.cpython-310.pyc,,
+scipy/_lib/__pycache__/deprecation.cpython-310.pyc,,
+scipy/_lib/__pycache__/doccer.cpython-310.pyc,,
+scipy/_lib/__pycache__/uarray.cpython-310.pyc,,
+scipy/_lib/_array_api.py,sha256=Ibx-wfA11m7xKtNIlvYhS4e71GyehsGnUVxlcLKF4Rs,12740
+scipy/_lib/_bunch.py,sha256=WooFxHL6t0SwjcwMDECM5wcWWLIS0St8zP3urDVK-V0,8120
+scipy/_lib/_ccallback.py,sha256=N9CO7kJYzk6IWQR5LHf_YA1-Oq48R38UIhJFIlJ2Qyc,7087
+scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so,sha256=5pAHAaCrTH-nc9QfyOTVZyhYQUhE91sN43PlNKSZ8kw,110000
+scipy/_lib/_disjoint_set.py,sha256=o_EUHZwnnI1m8nitEf8bSkF7TWZ65RSiklBN4daFruA,6160
+scipy/_lib/_docscrape.py,sha256=B4AzU5hrwyo8bJLBlNU-PQ0qCtgStZe_LasHc2Q9ZwE,21498
+scipy/_lib/_elementwise_iterative_method.py,sha256=w3qm_WWCu4nrtcbdnX8Wx2SKRYpamMfeyxjfmyvBONs,13509
+scipy/_lib/_finite_differences.py,sha256=llaIPvCOxpE4VA8O8EycPEU8i6LHJyOD-y7Y9OvQHt0,4172
+scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so,sha256=Kk1mpVY1lns4OpLjvNrW4B9W-nLAOgt6nH-0O5oSRTg,16400
+scipy/_lib/_gcutils.py,sha256=hajQd-HUw9ckK7QeBaqXVRpmnxPgyXO3QqqniEh7tRk,2669
+scipy/_lib/_pep440.py,sha256=vo3nxbfjtMfGq1ektYzHIzRbj8W-NHOMp5WBRjPlDTg,14005
+scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so,sha256=yGXELz3LHq-9jmN9DzA6APmFeL4wvY_rPypIid98qsg,23232
+scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so,sha256=gjamXXU2UkqozZOn9JfyjxoTO7zEtVUII1-TWVoc2B0,49544
+scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so,sha256=NN0_uZiXj6yZNa3FhUCTutwkPNT0atOxMSp0KGYi9og,34392
+scipy/_lib/_testutils.py,sha256=JtE6ksxrUr0E-A8sEXazvoXvnHympmXabXCys0dRtjU,8134
+scipy/_lib/_threadsafety.py,sha256=xuVqUS2jv46fOOQf7bcrhiYtnPVygqmrIVJc-7_LlI8,1455
+scipy/_lib/_tmpdirs.py,sha256=z3IYpzACnWdN_BMjOvqYbkTvYyUbfbQvfehq7idENSo,2374
+scipy/_lib/_uarray/LICENSE,sha256=yAw5tfzga6SJfhTgsKiLVEWDNNlR6xNhQC_60s-4Y7Q,1514
+scipy/_lib/_uarray/__init__.py,sha256=Rww7wLA7FH6Yong7oMgl_sHPpjcRslRaTjh61W_xVg4,4493
+scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc,,
+scipy/_lib/_uarray/_backend.py,sha256=CeTV7H8oXRs7wrdBu9MXqz5-5EtRyzXnDrTlsMWtyt8,20432
+scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so,sha256=b_JP1CEg6jY9SIth1B8Rem111VgcmQHwa3_5ECGnq9M,173888
+scipy/_lib/_util.py,sha256=zPHnzzCxXrbHdiejH81_MRL6K0P84SG1S-Bq6sDN6j8,32217
+scipy/_lib/array_api_compat/__init__.py,sha256=sC0Ht3rsA1SxX6cuBmBSe2mJ8_m2SODKN29BjIxlwP8,946
+scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc,,
+scipy/_lib/array_api_compat/_internal.py,sha256=RiQvh6ZoZLXw0l2CYKMG_6_PwmDO3qm7Hay8MMpgObc,987
+scipy/_lib/array_api_compat/common/__init__.py,sha256=fH4Ux-dWyQRkZ6WxqDTv-Bges_uKQ80TgTKOxvZ2MFE,24
+scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/_aliases.py,sha256=P6-5PJI0ZzVPS58CwpAVh__B8TkVMK7_4DYy8SbpC3A,16263
+scipy/_lib/array_api_compat/common/_helpers.py,sha256=Rn-aG4Vu56auzREAnmkhEsQMr9z__4sgEUEQq2E0elA,8206
+scipy/_lib/array_api_compat/common/_linalg.py,sha256=4D1-ukLTf7s3t6LaFsoR_mMkblceSywx4cYXbeeqZ28,6301
+scipy/_lib/array_api_compat/common/_typing.py,sha256=Wfsx0DJSMTIGfMoj_tqH2-HjxPyVSbQ9aUB02FaEYsA,388
+scipy/_lib/array_api_compat/cupy/__init__.py,sha256=g9IFwPzeOhMXnR-c-Qf8QFXfAltPp6SlS9AtZrjKAQw,397
+scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/_aliases.py,sha256=bKFKl2rLDX9r74Arv-HZg2yj-ZZqRwGbNoUZnsSORgM,2602
+scipy/_lib/array_api_compat/cupy/_typing.py,sha256=oDhrZB8R-D6wvee7tR4YkyBhTq93M0fFi3Tv-lpN_Dg,617
+scipy/_lib/array_api_compat/cupy/linalg.py,sha256=KidQHA9W3gBTRtWZ9963XiMXel-TvFCSecqB3Te0G9o,1358
+scipy/_lib/array_api_compat/numpy/__init__.py,sha256=bhqr1ecsSl-w5N_TnaaItHsT3eWnNtsC5H5C_6zFu7o,596
+scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/_aliases.py,sha256=xmcLK4lvyXgrPQNnNuwXut0LYcKBzxruvcQxXcSEjOI,2606
+scipy/_lib/array_api_compat/numpy/_typing.py,sha256=OFRXfhT8-snL_4VeOjbOCd_yYIGqVS-IRrZoWNcL3v4,618
+scipy/_lib/array_api_compat/numpy/linalg.py,sha256=e3gqAyX01YCMHYrQ0rGZ8haub9ZhfHv8TZe1haaRkpE,1189
+scipy/_lib/array_api_compat/torch/__init__.py,sha256=MWtkg6kdsN8CaTgYQJvjVMZu3RQq2mUkyme7yfkUWSE,518
+scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/_aliases.py,sha256=s-1HnikHDhbBGBDquuiulALiQohOthMOPbonWuV4Fuk,26792
+scipy/_lib/array_api_compat/torch/linalg.py,sha256=H6lb-umJYLcrGCEaaaH___3rJkk6dnfXNntU8tyt20E,2485
+scipy/_lib/decorator.py,sha256=ILVZlN5tlQGnmbgzNKH2TTcNzGKPlHwMuYZ8SbSEORA,15040
+scipy/_lib/deprecation.py,sha256=nAiyFAWEH2Bk5P5Hy_3HSUM3v792GS9muBKr-fdj3Yk,8074
+scipy/_lib/doccer.py,sha256=shdWIi3u7QBN5CyyKwqWW99qOEsiFewB8eH10FWhYLM,8362
+scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so,sha256=eTSn1CnkC1JxrK6XqCuyWPjzXht_-pbNMSTXTlwapPQ,85664
+scipy/_lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc,,
+scipy/_lib/tests/test__gcutils.py,sha256=qvfxvemSmGvaqcpHwoEzdXYn5mrAf-B1X5qGGyasPC4,3416
+scipy/_lib/tests/test__pep440.py,sha256=u9hPoolK4AoIIS-Rq74Du5SJu5og2RxMwgaAvGgWvRo,2277
+scipy/_lib/tests/test__testutils.py,sha256=P4WDJpUgy19wD9tknQSjIivuQvZF7YUBGSBWlur2QRA,800
+scipy/_lib/tests/test__threadsafety.py,sha256=qSfCF5OG_5lbnSl-grmDN_QCU4QLe-fS3sqnwL04pf8,1322
+scipy/_lib/tests/test__util.py,sha256=lG711zcPwi8uNPrMkgwGHqIKbEPHhlU8lYj6gWVT9aA,14479
+scipy/_lib/tests/test_array_api.py,sha256=6y0vlLDf5UaMglwzdN-gWqp14EgT5N2blDYjR_5OYyE,4039
+scipy/_lib/tests/test_bunch.py,sha256=sViE5aFSmAccfk8kYvt6EmzR5hyQ9nOSWMcftaDYDBg,6168
+scipy/_lib/tests/test_ccallback.py,sha256=dy9g70zyd80KpawffSKgWbddsKUwNNeF5sbxMfCTk6w,6175
+scipy/_lib/tests/test_deprecation.py,sha256=a_3r_9pFx1sxJXeFgiTSV9DXYnktc4fio1hR0ITPywA,364
+scipy/_lib/tests/test_import_cycles.py,sha256=lsGEBuEMo4sbYdZNSOsxAQIJgquUIjcDhQjtr0cyFg4,500
+scipy/_lib/tests/test_public_api.py,sha256=vT2kkjgtkMhxPq3mAoQOZnoD5HEHabHMWrBVW4UsvvE,19234
+scipy/_lib/tests/test_scipy_version.py,sha256=jgo-2YhCkBksXHM6xKiN_iJJZkqz0CvXqn2jVxx1djA,606
+scipy/_lib/tests/test_tmpdirs.py,sha256=URQRnE_lTPw9MIJYBKXMfNATQ0mpsBDgoqAowkylbWQ,1240
+scipy/_lib/tests/test_warnings.py,sha256=MnTTTqcMhloMzL0BeZ2JN2oAL0JKzjZ7UY3IOjOrMQs,4546
+scipy/_lib/uarray.py,sha256=4X0D3FBQR6HOYcwMftjH-38Kt1nkrS-eD4c5lWL5DGo,815
+scipy/cluster/__init__.py,sha256=LNM_kFbT28cIYYgctilxYsxdjuF3KuiOaulZH4dFatE,876
+scipy/cluster/__pycache__/__init__.cpython-310.pyc,,
+scipy/cluster/__pycache__/hierarchy.cpython-310.pyc,,
+scipy/cluster/__pycache__/vq.cpython-310.pyc,,
+scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so,sha256=gnVW1uPcgm7A7GKbjVh8mTAU8J6S5bogQVrvT9VzCcc,422992
+scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so,sha256=x4gPXHf2lhC3HPwsoQa_tDJC3wEhkY8Rzl4ADWDygH8,355856
+scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so,sha256=Jj6cJ1TAj11XH4TfH2vtw47yy3q0_LVqDIwPtA96ZxY,127888
+scipy/cluster/hierarchy.py,sha256=XHNOlJBrIReWElJN1MfosbN12aE5jSxsZD-KtTKa-F0,148588
+scipy/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc,,
+scipy/cluster/tests/hierarchy_test_data.py,sha256=7syUYdIaDVr7hgvMliX0CW4386utjBJn1DOgX0USXls,6850
+scipy/cluster/tests/test_disjoint_set.py,sha256=EuHGBE3ZVEMnWFbCn8tjI-_6CWrNXfpnv5bUBa9qhWI,5525
+scipy/cluster/tests/test_hierarchy.py,sha256=qVwLvvVO7iJNfqWJWdXia1oXOY-T6s09Yf58IuNG6zc,48726
+scipy/cluster/tests/test_vq.py,sha256=pSUokcwvp50iWwyrlNN53VxCaShDCScjRMJ6hcISyWc,17609
+scipy/cluster/vq.py,sha256=abgPHLJDSEH8mwGaGMtMG1rmkI09P272ji0yfMcjmN4,30738
+scipy/conftest.py,sha256=7ocP1roANCCWR6A8lCUUGFoWHX-HAPEo2bUdvbvx-Ag,9034
+scipy/constants/__init__.py,sha256=Pvyiayo6WX0cVORlr-Ap0VacI5hu5C8PQ17HIwgLcTc,12437
+scipy/constants/__pycache__/__init__.cpython-310.pyc,,
+scipy/constants/__pycache__/_codata.cpython-310.pyc,,
+scipy/constants/__pycache__/_constants.cpython-310.pyc,,
+scipy/constants/__pycache__/codata.cpython-310.pyc,,
+scipy/constants/__pycache__/constants.cpython-310.pyc,,
+scipy/constants/_codata.py,sha256=AAXUgkUuVsGHJ0axSfGyxTd8MkPV6yiza-Q2MSJyt58,155635
+scipy/constants/_constants.py,sha256=CcZ7BBKx8NuVpvjBeS0lY0I1yg5lnhSVhLPKGjIMaPU,10376
+scipy/constants/codata.py,sha256=RMD4V770zdsftqP4MN559SKUq1J15dwWStdID0Z_URE,794
+scipy/constants/constants.py,sha256=w7sGxSidD2Q9Ged0Sn1pnL-qqD1ssEP1A8sZWeLWBeI,2250
+scipy/constants/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/constants/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc,,
+scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc,,
+scipy/constants/tests/test_codata.py,sha256=ToO_lhQOsusJlP3QjrYqa1vw7x6wTCuKH17fg87tH08,1959
+scipy/constants/tests/test_constants.py,sha256=PY1oy6bbM2zoPAPgUeBqVThnVRuu4lBt_uMmxm7Ct38,1632
+scipy/datasets/__init__.py,sha256=7IzOi9gij2mhYCCMWJE1RiI22E1cVbe6exL9BRm1GXs,2802
+scipy/datasets/__pycache__/__init__.cpython-310.pyc,,
+scipy/datasets/__pycache__/_download_all.cpython-310.pyc,,
+scipy/datasets/__pycache__/_fetchers.cpython-310.pyc,,
+scipy/datasets/__pycache__/_registry.cpython-310.pyc,,
+scipy/datasets/__pycache__/_utils.cpython-310.pyc,,
+scipy/datasets/_download_all.py,sha256=iRPR2IUk6C3B5u2q77yOhac449MRSoRaTlCy2oCIknE,1701
+scipy/datasets/_fetchers.py,sha256=Jt8oklMEdZSKf0yJddYCarjlMcOl1XRsdv1LW8gfwE0,6760
+scipy/datasets/_registry.py,sha256=br0KfyalEbh5yrQLznQ_QvBtmN4rMsm0UxOjnsJp4OQ,1072
+scipy/datasets/_utils.py,sha256=kdZ-Opp7Dr1pCwM285p3GVjgZTx_mKWCvETur92FWg4,2967
+scipy/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc,,
+scipy/datasets/tests/test_data.py,sha256=GelFTF2yZqiiQkgTv8ukv8sKTJBdmpsyK5fr0G6z7Ls,4064
+scipy/fft/__init__.py,sha256=XjfuqqFtHktAmDhKoFSca5JoYqCaQxtZRdH0SlPNYjM,3513
+scipy/fft/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/__pycache__/_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_basic.cpython-310.pyc,,
+scipy/fft/__pycache__/_basic_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_debug_backends.cpython-310.pyc,,
+scipy/fft/__pycache__/_fftlog.cpython-310.pyc,,
+scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_helper.cpython-310.pyc,,
+scipy/fft/__pycache__/_realtransforms.cpython-310.pyc,,
+scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc,,
+scipy/fft/_backend.py,sha256=5rBxK8GQtCMnuPHc-lNQdpH4uFFZ9_5vBukkDv6jRRA,6544
+scipy/fft/_basic.py,sha256=lGJ8qQTMXUJEbq_2vwfPPPlX7b4j358ks9LLretOtEY,62997
+scipy/fft/_basic_backend.py,sha256=BnexiVV20wvTXBPYbY89v_mCL6hzP7iF6w_ahG7EgHQ,6546
+scipy/fft/_debug_backends.py,sha256=RlvyunZNqaDDsI3-I6QH6GSBz_faT6EN4OONWsvMtR8,598
+scipy/fft/_fftlog.py,sha256=_ryVlUuSQp_J0hH8VFGMRn4ZvzudHqKDYCVbpV-WVsY,7866
+scipy/fft/_fftlog_backend.py,sha256=K-nbAr00YkJ0G5Y_WSe5aorImbnVswKQcRkGSaYLs38,5237
+scipy/fft/_helper.py,sha256=U47qLBvBl6cs6eicfdq1nldfUVs70Nw0ByOCZmuqAG0,10048
+scipy/fft/_pocketfft/LICENSE.md,sha256=wlSytf0wrjyJ02ugYXMFY7l2D8oE8bdGobLDFX2ix4k,1498
+scipy/fft/_pocketfft/__init__.py,sha256=dROVDi9kRvkbSdynd3L09tp9_exzQ4QqG3xnNx78JeU,207
+scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc,,
+scipy/fft/_pocketfft/basic.py,sha256=4HR-eRDb6j4YR4sqKnTikFmG0tnUIXxa0uImnB6_JVs,8138
+scipy/fft/_pocketfft/helper.py,sha256=lVpf-oCVBU-TAcreDe15vfbZwpxbfvCGzut0w9cu-As,5807
+scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so,sha256=n6qi8DOYhcVycyVM5IMTzmolFDQXyWGKY6Hql9-IY2k,1197600
+scipy/fft/_pocketfft/realtransforms.py,sha256=4TmqAkCDQK3gs1ddxXY4rOrVfvQqO8NyVtOzziUGw6E,3344
+scipy/fft/_pocketfft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/test_basic.py,sha256=TviTxRl-MOQPcBgu-vvGU_wOunD59HQCc8k2-IdV3X4,35373
+scipy/fft/_pocketfft/tests/test_real_transforms.py,sha256=wn3Lgln-PL2OpSoWjKa4G4mXmngT-mLkOuZTZl3jxK0,16656
+scipy/fft/_realtransforms.py,sha256=QmO9CDqrAsvBcLNgIzFBIWBTYsSUCRJ_Cj1myv73KlE,25386
+scipy/fft/_realtransforms_backend.py,sha256=u4y4nBGCxpTLVqxK1J7xV6tcpeC3-8iiSEXLOcRM9wI,2389
+scipy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fft/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fft/tests/mock_backend.py,sha256=RAlVSy4Qtk1oTaEG9fl4WKonoSijVHIDfxqv5MbVBPY,2554
+scipy/fft/tests/test_backend.py,sha256=KnLuBO1gQcuaLlr2IP8ndhn2hNFe24EiKPvqbv4o1I4,4275
+scipy/fft/tests/test_basic.py,sha256=CRtrf1R8UoZiKrHKBgzyUK4jpAOkqmSXS55seksgHPI,21216
+scipy/fft/tests/test_fftlog.py,sha256=iRvVB54ZMJSJG52bE-t3mqfHDHesuxnfD1phNAScyGo,6173
+scipy/fft/tests/test_helper.py,sha256=8ynydSBXgDSA5uHjrSI891wYOpF7g4veIJ536Iv535Q,15436
+scipy/fft/tests/test_multithreading.py,sha256=Ub0qD3_iSApPT9E71i0dvKnsKrctLiwMq95y3370POE,2132
+scipy/fft/tests/test_real_transforms.py,sha256=sN5XJmLrnmlIBr7Z5GWYeOCZNQs3_8bAgVL44ShP0c8,8621
+scipy/fftpack/__init__.py,sha256=rLCBFC5Dx5ij_wmL7ChiGmScYlgu0mhaWtrJaz_rBt0,3155
+scipy/fftpack/__pycache__/__init__.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_basic.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_helper.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc,,
+scipy/fftpack/__pycache__/basic.cpython-310.pyc,,
+scipy/fftpack/__pycache__/helper.cpython-310.pyc,,
+scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc,,
+scipy/fftpack/_basic.py,sha256=Sk_gfswmWKb3za6wrU_mIrRVBl69qjzAu9ltznbDCKs,13098
+scipy/fftpack/_helper.py,sha256=g5DZnOVLyLw0BRm5w9viScU3GEPmHwRCwy5dcHdJKb4,3350
+scipy/fftpack/_pseudo_diffs.py,sha256=eCln0ZImNYr-wUWpOZ-SmKKIbhJsV8VBLmwT_C79RsQ,14200
+scipy/fftpack/_realtransforms.py,sha256=ledb21L13ofGnOU4pkx8uWuARCxsh3IFQrHctxTgzzw,19214
+scipy/fftpack/basic.py,sha256=i2CMMS__L3UtFFqe57E0cs7AZ4U6VO-Ted1KhU7_wNc,577
+scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so,sha256=uF-nqpiMIPukki9UtxcVlw3ayffpXE7f8vQnt6fC0AA,272968
+scipy/fftpack/helper.py,sha256=M7jTN4gQIRWpkArQR13bI7WN6WcW-AabxKgrOHRvfeQ,580
+scipy/fftpack/pseudo_diffs.py,sha256=RqTDJRobZQGZg6vSNf4FBzFdLTttkqdWTGchttuQhDo,674
+scipy/fftpack/realtransforms.py,sha256=9-mR-VV3W14oTaD6pB5-RIDV3vkTBQmGCcxfbA8GYH0,595
+scipy/fftpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fftpack/tests/fftw_double_ref.npz,sha256=pgxklBW2RSI5JNg0LMxcCXgByGkBKHo2nlP8kln17E4,162120
+scipy/fftpack/tests/fftw_longdouble_ref.npz,sha256=pAbL1NrQTQxZ3Tj1RBb7SUJMgiKcGgdLakTsDN4gAOM,296072
+scipy/fftpack/tests/fftw_single_ref.npz,sha256=J2qRQTGOb8NuSrb_VKYbZAVO-ISbZg8XNZ5fVBtDxSY,95144
+scipy/fftpack/tests/test.npz,sha256=Nt6ASiLY_eoFRZDOSd3zyFmDi32JGTxWs7y2YMv0N5c,11968
+scipy/fftpack/tests/test_basic.py,sha256=nLMulUtVIcsVzahpYuSvuEqGHgLeCwpar5YhLbtiTxI,30307
+scipy/fftpack/tests/test_helper.py,sha256=8JaPSJOwsk5XXOf1zFahJ_ktUTfNGSk2-k3R6e420XI,1675
+scipy/fftpack/tests/test_import.py,sha256=Sz4ZZmQpz_BtiO0Gbtctt6WB398wB17oopv5mkfOh0U,1120
+scipy/fftpack/tests/test_pseudo_diffs.py,sha256=SEVPHPDdSxDSUCC8qkwuKD7mIX8rFIx9puxGzBYd1uk,13389
+scipy/fftpack/tests/test_real_transforms.py,sha256=W-gHxBHV3elIPFDOuZvSfZkEuMYJ6edjG7fL-3vVY1s,23971
+scipy/integrate/__init__.py,sha256=Nb06g1FvgETDPfultR4y_JGZCR31k9xrvpcq5VtoGPo,4236
+scipy/integrate/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/__pycache__/_bvp.cpython-310.pyc,,
+scipy/integrate/__pycache__/_ode.cpython-310.pyc,,
+scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quadrature.cpython-310.pyc,,
+scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc,,
+scipy/integrate/__pycache__/dop.cpython-310.pyc,,
+scipy/integrate/__pycache__/lsoda.cpython-310.pyc,,
+scipy/integrate/__pycache__/odepack.cpython-310.pyc,,
+scipy/integrate/__pycache__/quadpack.cpython-310.pyc,,
+scipy/integrate/__pycache__/vode.cpython-310.pyc,,
+scipy/integrate/_bvp.py,sha256=7OiL3Kg7IZlmUkcrBy6qzyjhayV546_HlB6kb6o7zh4,40927
+scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so,sha256=vDU7-kaNDtANrwFHsf1mKG0KYq6K33OkR7CVYAmTLb8,116977
+scipy/integrate/_ivp/__init__.py,sha256=gKFR_pPjr8fRLgAGY5sOzYKGUFu2nGX8x1RrXT-GZZc,256
+scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc,,
+scipy/integrate/_ivp/base.py,sha256=Mlef_dgmn0wzjFxZA3oBbtHrQgrfdZw_8k1mLYNZP4A,10295
+scipy/integrate/_ivp/bdf.py,sha256=deQVxWq58ihFDWKC8teztUbe8MYN4mNgLCU-6aq_z1U,17522
+scipy/integrate/_ivp/common.py,sha256=A6_X4WD0PwK-6MhOAmU8aj8CLuVdlxfBlKdPNxab-lE,15274
+scipy/integrate/_ivp/dop853_coefficients.py,sha256=OrYvW0Hu6X7sOh37FU58gNkgC77KVpYclewv_ARGMAE,7237
+scipy/integrate/_ivp/ivp.py,sha256=C5jQvVgpf0cBo_khaVO_bE9Mh8V-yOadv_xzc8FXKsQ,31472
+scipy/integrate/_ivp/lsoda.py,sha256=t5t2jZBgBPt0G20TOI4SVXuGFAZYAhfDlJZhfCzeeDo,9927
+scipy/integrate/_ivp/radau.py,sha256=7Ng-wYOdOBf4ke4-CYyNUQUH3jgYmDflpE1UXIYNOdU,19743
+scipy/integrate/_ivp/rk.py,sha256=kYWCzolgXwnDuDIqDViI2Exzu61JekmbbCYuQhGYsgA,22781
+scipy/integrate/_ivp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/test_ivp.py,sha256=Y1pItTm6-38k1_nDMrWTKwa36vmxd2234gq4uDReUOs,37088
+scipy/integrate/_ivp/tests/test_rk.py,sha256=K9UxZghBzSL2BzmgLndPJcWOWV4Nr530TGKWakpsoeM,1326
+scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so,sha256=kx57YqVE3UeZ8-WerSmFuEdoKw9ksGYl866TbJuSizI,113129
+scipy/integrate/_ode.py,sha256=UBdaILr3TUmCPs-pg32Eni12Gb0WKmyqVp_C5fTVHZQ,48074
+scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so,sha256=eSHckX_3y6otFz6AwgyRzvi2QuEt9C7HlBxBhP-LlDM,83577
+scipy/integrate/_odepack_py.py,sha256=ULRxBnl_FzZbmf_zfFMIK8r11puTTT37IzRy9rVONd8,10912
+scipy/integrate/_quad_vec.py,sha256=zJrfx12UOsyI2bY26BZclLsxhv42xUEZ3ZSDcAcHaog,21234
+scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so,sha256=2ET4zWnuL8B1NBcz8-XRcgCHlRtre207lGMticJsW3Y,116449
+scipy/integrate/_quadpack_py.py,sha256=RMY5JyhkDVESV4sZb2iUEBNezZ2Y-Z5dru5Bbx1k5Yk,53622
+scipy/integrate/_quadrature.py,sha256=27OnvuGOs0s1j60mkpD33NkvfqEDyRkZZ2SdtsGshqE,65061
+scipy/integrate/_tanhsinh.py,sha256=8bDtLU3cNHtHz2KZ_TDPEWlkaixUUeTZEfiCsTH2NJs,52905
+scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so,sha256=oCO9DKyKPy4ERYj4rP5sVzsJ2V1Goc521tLC5k-WlzE,16896
+scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so,sha256=tHI0zXWYhupvzXbVmoEsayJBo1ABVNEWNQ6BZtwgJEo,108745
+scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so,sha256=B4uehSegEEOvby4pRpU_g3IGvbLNI9IWgYOACRKpVAY,166393
+scipy/integrate/dop.py,sha256=EaxhHt4tzQjyQv6WBKqfeJtiBVQmhrcEIgkBzrTQ4Us,453
+scipy/integrate/lsoda.py,sha256=hUg4-tJcW3MjhLjLBsD88kzP7qGp_zLGw1AH2ZClHmw,436
+scipy/integrate/odepack.py,sha256=G5KiKninKFyYgF756_LtDGB68BGk7IwPidUOywFpLQo,545
+scipy/integrate/quadpack.py,sha256=OAAaraeGThs2xYYWqKIOHiTe73Qh6zr8aoI1t8cqpnk,617
+scipy/integrate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc,,
+scipy/integrate/tests/test__quad_vec.py,sha256=-pcKFE_LsIiMx-bGJWztpib8uhwe8AyETTM8yvv9If0,6284
+scipy/integrate/tests/test_banded_ode_solvers.py,sha256=kJWirYckJ7k4tfweg1ds-Tozp3GEhxTbuXfgSdeJw7k,6687
+scipy/integrate/tests/test_bvp.py,sha256=Q3zw4r3lajNE9y2smIkAayRWrZ67r-yTuXODPeyvecY,20181
+scipy/integrate/tests/test_integrate.py,sha256=U-TlhrTUh8BnQ7SlW9enL5gvO15QcGlmfDEHhnjhct4,24400
+scipy/integrate/tests/test_odeint_jac.py,sha256=enXGyQQ4m-9kMPDaWvipIt3buYZ5jNjaxITP8GoS86s,1816
+scipy/integrate/tests/test_quadpack.py,sha256=e6dBmLYXrV_veLdsypR0fTs8JW_rTTAlSC5ue3vy_JA,27983
+scipy/integrate/tests/test_quadrature.py,sha256=_mQiQ1NizES6MYRUkNP1DlGssXp75aV61wajiSWEXuM,29999
+scipy/integrate/tests/test_tanhsinh.py,sha256=fWXykp3jX-lE9HLeaTaGLY2iHQ8sHIWQnsTmxSADq2k,34195
+scipy/integrate/vode.py,sha256=Jt60dcK-zXBgQF45FNRVtvyUbnkmaNWGbjX00I2mC3k,453
+scipy/interpolate/__init__.py,sha256=AULPLFlB27t4jwYSXN_vojbsO4QF_UiN1kGVsxWeCSs,3530
+scipy/interpolate/__pycache__/__init__.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_cubic.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_pade.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_polyint.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rbf.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rgi.cpython-310.pyc,,
+scipy/interpolate/__pycache__/fitpack.cpython-310.pyc,,
+scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc,,
+scipy/interpolate/__pycache__/interpolate.cpython-310.pyc,,
+scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc,,
+scipy/interpolate/__pycache__/polyint.cpython-310.pyc,,
+scipy/interpolate/__pycache__/rbf.cpython-310.pyc,,
+scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so,sha256=9q6P0Lo0k8OVE34kb7GWzORo5pnd5Ff3lNundUqoAZA,617056
+scipy/interpolate/_bsplines.py,sha256=0UV-sSOfzePJI4wUP6R2rX4AfdOhocDRLhRDDokyJr0,75440
+scipy/interpolate/_cubic.py,sha256=iuDbeuOhlDYUzGNpvvlnPv6xiG5_8pZIONqQ4b6nPiQ,38162
+scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so,sha256=Q6xkCivTDhfYysI9JJBaukVXyWakOeI76qsMb-OXVQ0,91409
+scipy/interpolate/_fitpack2.py,sha256=KFfeRremt7_PYekhXuH4rjlRrUvMw0pvKlxvgfHDFyE,89172
+scipy/interpolate/_fitpack_impl.py,sha256=oTxX0ZBw1eChL2gKyVnEIOjQhbOdHv1JAFXPCivVi8A,28669
+scipy/interpolate/_fitpack_py.py,sha256=HxdppqjgMmwwK-a2ZIoNSEjikbMlRLqWErKPdWoijSE,28064
+scipy/interpolate/_interpolate.py,sha256=eBpiTbpC4_9O-7pokew59fmtazbOYN1Se__7d32HG3k,88259
+scipy/interpolate/_ndbspline.py,sha256=rXABycf5_j8ESpY3DO_ysu76kxLKo1CawWUjbQzMSQk,12742
+scipy/interpolate/_ndgriddata.py,sha256=Piz6T2dSyv7ozsX_sn3K5DdEIa18I9UJca9V2NrF4Uc,12092
+scipy/interpolate/_pade.py,sha256=OBorKWc3vCSGlsWrajoF1_7WeNd9QtdbX0wOHLdRI2A,1827
+scipy/interpolate/_polyint.py,sha256=jcB08oyPsO71j7omBYaz-q0UbGfnxMJPzUik6lMgkD0,34983
+scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so,sha256=xHUlm6LStHTmGXQPUOPR1_b8Ezk4LkcK3PajcL2okuw,470232
+scipy/interpolate/_rbf.py,sha256=tBeBsMEe_NO1yxEv8PsX8ngVearEn1VfOyrCqEfr_Uc,11674
+scipy/interpolate/_rbfinterp.py,sha256=bzuAuZpojP-cKCukD3jVekbQzZfHnrUT13Sex5pkKOI,19723
+scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so,sha256=8AcYGq3EaX0OSiAOrQ029ZL7GJyldWVJHPec3gxh0Q0,261280
+scipy/interpolate/_rgi.py,sha256=zEKwwpQpvKU4j8NBc1SzPE61rdi_zACcZwPeqVTaPTk,31491
+scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so,sha256=h4NpmHAmUh_fXagbo8NYUbcz_vN0I2htm3JH0rxnu1U,295704
+scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so,sha256=OuDAHsFz09ayCBQPQy0SX3i2bI3aFziXHaA4MMlqe2c,338105
+scipy/interpolate/fitpack.py,sha256=VJP17JUH7I0hQhdGaOfhXpJkyUGYuKDfaZ0GGFdLE9o,716
+scipy/interpolate/fitpack2.py,sha256=34oNI8q0UKW6kLh0iLGToTKmen1CsKHKiendex3Fp9k,964
+scipy/interpolate/interpnd.cpython-310-x86_64-linux-gnu.so,sha256=rLGJfF7UUnMGYuVFCvv1VhSmYvcrOvtfuKSsEfU-6pA,484664
+scipy/interpolate/interpolate.py,sha256=pmWxfOOtaAvMKJvkO8oLvMGBZp1cEDvUM9PJWg2Cl2g,963
+scipy/interpolate/ndgriddata.py,sha256=F65cg9Tw-3LQy-G3V0YWFMN4yF23I6xOoQI3idK-sPg,677
+scipy/interpolate/polyint.py,sha256=-KGJfScIoqD3mTuR7FKS8MKWaE4EtPzomfB0Zoaa4f4,712
+scipy/interpolate/rbf.py,sha256=9AKQfUe99wmx8GaQoOd1sMo-o9yupBtvYBshimRqG9Y,597
+scipy/interpolate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc,,
+scipy/interpolate/tests/data/bug-1310.npz,sha256=jWgDwLOY8nBMI28dG56OXt4GvRZaCrsPIoKBq71FWuk,2648
+scipy/interpolate/tests/data/estimate_gradients_hang.npy,sha256=QGwQhXQX_16pjYzSiUXJ0OT1wk-SpIrQ6Pq5Vb8kd_E,35680
+scipy/interpolate/tests/data/gcvspl.npz,sha256=A86BVabLoMG_CiRBoQwigZH5Ft7DbLggcjQpgRKWu6g,3138
+scipy/interpolate/tests/test_bsplines.py,sha256=XoOzxITldFfd5JxbGa2M_v6AL3USCNsAkq5mJZBBzKI,93848
+scipy/interpolate/tests/test_fitpack.py,sha256=zkOUpis1bFPOiZSuBTcwOpM8TH8lYE37YhLlY_n_cdw,16057
+scipy/interpolate/tests/test_fitpack2.py,sha256=fyNnCzCp2V-OQ8hHuRtgeSEcBlB102KFTu1HeOXm2ik,58726
+scipy/interpolate/tests/test_gil.py,sha256=wt92CaxUlVgRGB-Wl2EuQxveqdARU8rZucD9IKl-pUE,1874
+scipy/interpolate/tests/test_interpnd.py,sha256=n-jvOfEyyPrA46HH43xT-5mH7jN8iICRz6Hou80aPog,13675
+scipy/interpolate/tests/test_interpolate.py,sha256=QkW9zZJzp-1sC-bBjbfUwpF9nsEEQhsyNXbKXCLm7U0,97533
+scipy/interpolate/tests/test_ndgriddata.py,sha256=2q-eRB6cvvRjtBaeFjjZJJXkkYA_ILXSecOZueT0Z3Q,10980
+scipy/interpolate/tests/test_pade.py,sha256=qtJfPaUxPCt2424CeYUCHIuofGGq0XAiyFCLYdkSMLg,3808
+scipy/interpolate/tests/test_polyint.py,sha256=q6S4LFc0aJjbxm4H0rP1NFspQ9QHvzT9E4ZJVJd6ujM,36326
+scipy/interpolate/tests/test_rbf.py,sha256=OitMk6wEbVeRS_TUeSa-ReWqR7apVez2n-wYOI08grg,6559
+scipy/interpolate/tests/test_rbfinterp.py,sha256=i-gJl0zAl5ctWj2cRU6Wi9kHOrnbbFuSeS_Ltr0Mog8,18529
+scipy/interpolate/tests/test_rgi.py,sha256=31AtLCmsfVXmg3JJllgFq0cPBx9_7yN8nkrR1FFGFbg,44604
+scipy/io/__init__.py,sha256=XegFIpTjKz9NXsHPLcvnYXT-mzUrMqPJUD7a8dhUK_0,2735
+scipy/io/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/__pycache__/_fortran.cpython-310.pyc,,
+scipy/io/__pycache__/_idl.cpython-310.pyc,,
+scipy/io/__pycache__/_mmio.cpython-310.pyc,,
+scipy/io/__pycache__/_netcdf.cpython-310.pyc,,
+scipy/io/__pycache__/harwell_boeing.cpython-310.pyc,,
+scipy/io/__pycache__/idl.cpython-310.pyc,,
+scipy/io/__pycache__/mmio.cpython-310.pyc,,
+scipy/io/__pycache__/netcdf.cpython-310.pyc,,
+scipy/io/__pycache__/wavfile.cpython-310.pyc,,
+scipy/io/_fast_matrix_market/__init__.py,sha256=8okZpcBG5EjYz6kxS26Uxof9rk0YZcUb-3aT7dO_3SY,16876
+scipy/io/_fast_matrix_market/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so,sha256=cZ-MTGi7t1EIxpaNK6QWCio11p63h-40iUOohOpimCc,3827072
+scipy/io/_fortran.py,sha256=ZWR385RMYQtcjgv2S9CCaRwOHPKf1kzD8dzAIqw55WE,10895
+scipy/io/_harwell_boeing/__init__.py,sha256=2iVxlj6ZquU8_XPA37npOdeHCXe8XbQrmMZO7k6Bzxs,574
+scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc,,
+scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc,,
+scipy/io/_harwell_boeing/_fortran_format_parser.py,sha256=ykWecU9ysrCFRfeIdctaELnIDQMaCt6PjGwkxpljNzw,8917
+scipy/io/_harwell_boeing/hb.py,sha256=euxQyYRTvluzGUicNfEuyk4cOUCGLFCIs0r-8vjIZ-U,19177
+scipy/io/_harwell_boeing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/test_fortran_format.py,sha256=0LxOjUewBj1Fwf7EOxMWZG_PdzMbVrFYMUeGgs23VII,2360
+scipy/io/_harwell_boeing/tests/test_hb.py,sha256=3eLwxTSg_Ebt2pjBLvZhpq8WUMjkFhM1lsTu_mgvDTI,2284
+scipy/io/_idl.py,sha256=4oBvgwifLtx05eMKTNbYMfrOi1yi4poEM5scZb6J00w,27102
+scipy/io/_mmio.py,sha256=-SCJh-M8Zmh-UbBs8mbyFJhGP3eCRLbAknB0s0zl-rQ,31872
+scipy/io/_netcdf.py,sha256=dGNKBKWJ2ZcO5e5aQ1Z9oZW-n26clSweqv_bPhnSL78,39263
+scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so,sha256=wSJC3OO9XilZ0iWqb2Q8s1XjuWkRfZx4HgfsX6zRtkE,63449
+scipy/io/arff/__init__.py,sha256=czaV8hvY6JnmEn2qyU3_fzcy_P55aXVT09OzGnhJT9I,805
+scipy/io/arff/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/arff/__pycache__/_arffread.cpython-310.pyc,,
+scipy/io/arff/__pycache__/arffread.cpython-310.pyc,,
+scipy/io/arff/_arffread.py,sha256=iZgv9wiDI9oivXVd4lxhWgS1KPYS7sWvE9IV8bvlzPI,26560
+scipy/io/arff/arffread.py,sha256=q8OPAnQ_eP4K4ZyspmXOeaR-KwpiVvEKTntVPEWew3o,1145
+scipy/io/arff/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc,,
+scipy/io/arff/tests/data/iris.arff,sha256=fTS6VWSX6dwoM16mYoo30dvLoJChriDcLenHAy0ZkVM,7486
+scipy/io/arff/tests/data/missing.arff,sha256=ga__Te95i1Yf-yu2kmYDBVTz0xpSTemz7jS74_OfI4I,120
+scipy/io/arff/tests/data/nodata.arff,sha256=DBXdnIe28vrbf4C-ar7ZgeFIa0kGD4pDBJ4YP-z4QHQ,229
+scipy/io/arff/tests/data/quoted_nominal.arff,sha256=01mPSc-_OpcjXFy3EoIzKdHCmzWSag4oK1Ek2tUc6_U,286
+scipy/io/arff/tests/data/quoted_nominal_spaces.arff,sha256=bcMOl-E0I5uTT27E7bDTbW2mYOp9jS8Yrj0NfFjQdKU,292
+scipy/io/arff/tests/data/test1.arff,sha256=nUFDXUbV3sIkur55rL4qvvBdqUTbzSRrTiIPwmtmG8I,191
+scipy/io/arff/tests/data/test10.arff,sha256=va7cXiWX_AnHf-_yz25ychD8hOgf7-sEMJITGwQla30,199009
+scipy/io/arff/tests/data/test11.arff,sha256=G-cbOUUxuc3859vVkRDNjcLRSnUu8-T-Y8n0dSpvweo,241
+scipy/io/arff/tests/data/test2.arff,sha256=COGWCYV9peOGLqlYWhqG4ANT2UqlAtoVehbJLW6fxHw,300
+scipy/io/arff/tests/data/test3.arff,sha256=jUTWGaZbzoeGBneCmKu6V6RwsRPp9_0sJaSCdBg6tyI,72
+scipy/io/arff/tests/data/test4.arff,sha256=mtyuSFKUeiRR2o3mNlwvDCxWq4DsHEBHj_8IthNzp-M,238
+scipy/io/arff/tests/data/test5.arff,sha256=2Q_prOBCfM_ggsGRavlOaJ_qnWPFf2akFXJFz0NtTIE,365
+scipy/io/arff/tests/data/test6.arff,sha256=V8FNv-WUdurutFXKTOq8DADtNDrzfW65gyOlv-lquOU,195
+scipy/io/arff/tests/data/test7.arff,sha256=rxsqdev8WeqC_nKJNwetjVYXA1-qCzWmaHlMvSaVRGk,559
+scipy/io/arff/tests/data/test8.arff,sha256=c34srlkU8hkXYpdKXVozEutiPryR8bf_5qEmiGQBoG4,429
+scipy/io/arff/tests/data/test9.arff,sha256=ZuXQQzprgmTXxENW7we3wBJTpByBlpakrvRgG8n7fUk,311
+scipy/io/arff/tests/test_arffread.py,sha256=7L9m9tLfHz8moV8wJyLs1ob_gxFBCBr3SDpZXW1fgng,13104
+scipy/io/harwell_boeing.py,sha256=6cNioakGH8vMnjCt-k7W2vM5eq_L6ZMvnwpLB23KBoM,682
+scipy/io/idl.py,sha256=WWbkHVJPlPTH4XBQmts7g4ei1UBlZFvR9fJ79poHwzM,599
+scipy/io/matlab/__init__.py,sha256=YkLznYXgPaXmCNngcs9O9firIXLnM9Ez8iQC5luw2-Y,2028
+scipy/io/matlab/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio4.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/miobase.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/streams.cpython-310.pyc,,
+scipy/io/matlab/_byteordercodes.py,sha256=5mtMzDwNmpSWeEk901SKqwN2tIXSNIN1FBpmZ2Pn3XY,1985
+scipy/io/matlab/_mio.py,sha256=Bb4X8My32gDYfeZiRQuVzdJzjtGHJiwRYOxaQb3Z0Dg,12833
+scipy/io/matlab/_mio4.py,sha256=xSIrZ1BbIoxtoQqa44pu5LgvlCclehfUuoWR4Q1jZ4M,20713
+scipy/io/matlab/_mio5.py,sha256=28C22-ZpH782DqXyrpazkoEI6iCjnTcfXPWHZBstKB8,33580
+scipy/io/matlab/_mio5_params.py,sha256=skRcKG70vOlVMSb1TO67LB5312zuOUSrcOK7mOCcUss,8201
+scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so,sha256=9tssFfOqsNK1W7t81v3NDf5OjrqyCtX0jy5xVJPG9oQ,264600
+scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so,sha256=STtjkyZ6zE3AnPPRavWcxkjgucxtneKEsk4Z88GQMCU,73280
+scipy/io/matlab/_miobase.py,sha256=xw8D9CU6Aajk6-hXhtAW5GKMkbkSdJxTx17qogpSxCA,12962
+scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so,sha256=gskAgdMqQQLu2ptdArq7apZjG7Q-Riz0xWd2v81EYX8,147488
+scipy/io/matlab/byteordercodes.py,sha256=TP6lKr_4_0aUVqX5flFI_w_NabnJF3xvbm6xK4qWIws,611
+scipy/io/matlab/mio.py,sha256=imPlshqcGZNEuWlzpYW-Y_JzUqcwdI9Z1SE3gjCzTWo,678
+scipy/io/matlab/mio4.py,sha256=53boJCNzXr3bRewVn5xtBqp_gFvb1fEUZobx-cbxpqY,983
+scipy/io/matlab/mio5.py,sha256=tcfrucXyoBq5OOSQWLpQvmlABq0ZhgKnnLK_-0ld-LQ,1217
+scipy/io/matlab/mio5_params.py,sha256=bPjuNDH79SW5p-L4RFEXFiokiynE1rqolR26-qVH0RE,1294
+scipy/io/matlab/mio5_utils.py,sha256=BrUSxwpJ2d32lW6Gjuuh5Sk7SeMQv-MS1r0sc-ZcaBo,661
+scipy/io/matlab/mio_utils.py,sha256=JZP2mnyDKjHzABKHAZ5Nmxt9FdnlM1lUV-Qe4Uju2yk,558
+scipy/io/matlab/miobase.py,sha256=JKUwT3HNlPzLFiigr3lPj9WB7yBx7mF8xitGuFwWu5E,764
+scipy/io/matlab/streams.py,sha256=sh2KA6Wl-56ghy15v2P2tmIrH-Tb8bGnTp7z22XTx-8,585
+scipy/io/matlab/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/matlab/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio5_utils.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio_funcs.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio_utils.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_miobase.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_pathological.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_streams.cpython-310.pyc,,
+scipy/io/matlab/tests/data/bad_miuint32.mat,sha256=CVkYHp_U4jxYKRRHSuZ5fREop4tJjnZcQ02DKfObkRA,272
+scipy/io/matlab/tests/data/bad_miutf8_array_name.mat,sha256=V-jfVMkYyy8qRGcOIsNGcoO0GCgTxchrsQUBGBnfWHE,208
+scipy/io/matlab/tests/data/big_endian.mat,sha256=2ttpiaH2B6nmHnq-gsFeMvZ2ZSLOlpzt0IJiqBTcc8M,273
+scipy/io/matlab/tests/data/broken_utf8.mat,sha256=nm8aotRl6NIxlM3IgPegKR3EeevYZoJCrYpV4Sa1T5I,216
+scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat,sha256=X4dvE7K9DmGEF3D6I-48hC86W41jB54H7bD8KTXjtYA,276
+scipy/io/matlab/tests/data/corrupted_zlib_data.mat,sha256=DfE1YBH-pYw-dAaEeKA6wZcyKeo9GlEfrzZtql-fO_w,3451
+scipy/io/matlab/tests/data/japanese_utf8.txt,sha256=rgxiBH7xmEKF91ZkB3oMLrqABBXINEMHPXDKdZXNBEY,270
+scipy/io/matlab/tests/data/little_endian.mat,sha256=FQP_2MNod-FFF-JefN7ZxovQ6QLCdHQ0DPL_qBCP44Y,265
+scipy/io/matlab/tests/data/logical_sparse.mat,sha256=qujUUpYewaNsFKAwGpYS05z7kdUv9TQZTHV5_lWhRrs,208
+scipy/io/matlab/tests/data/malformed1.mat,sha256=DTuTr1-IzpLMBf8u5DPb3HXmw9xJo1aWfayA5S_3zUI,2208
+scipy/io/matlab/tests/data/miuint32_for_miint32.mat,sha256=romrBP_BS46Sl2-pKWsUnxYDad2wehyjq4wwLaVqums,272
+scipy/io/matlab/tests/data/miutf8_array_name.mat,sha256=Vo8JptFr-Kg2f2cEoDg8LtELSjVNyccdJY74WP_kqtc,208
+scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat,sha256=bvdmj6zDDUIpOfIP8J4Klo107RYCDd5VK5gtOYx3GsU,8168
+scipy/io/matlab/tests/data/one_by_zero_char.mat,sha256=Z3QdZjTlOojjUpS0cfBP4XfNQI3GTjqU0n_pnAzgQhU,184
+scipy/io/matlab/tests/data/parabola.mat,sha256=ENWuWX_uwo4Av16dIGOwnbMReAMrShDhalkq8QUI8Rg,729
+scipy/io/matlab/tests/data/single_empty_string.mat,sha256=4uTmX0oydTjmtnhxqi9SyPWCG2I24gj_5LarS80bPik,171
+scipy/io/matlab/tests/data/some_functions.mat,sha256=JA736oG3s8PPdKhdsYK-BndLUsGrJCJAIRBseSIEZtM,1397
+scipy/io/matlab/tests/data/sqr.mat,sha256=3DtGl_V4wABKCDQ0P3He5qfOzpUTC-mINdK73MKS7AM,679
+scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat,sha256=-odiBIQAbOLERg0Vg682QHGfs7C8MaA_gY77OWR8x78,232
+scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat,sha256=G5siwvZ-7Uv5KJ6h7AA3OHL6eiFsd8Lnjx4IcoByzCU,232
+scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat,sha256=EVj1wPnoyWGIdTpkSj3YAwqzTAm27eqZNxCaJAs3pwU,213
+scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat,sha256=S_Sd3sxorDd8tZ5CxD5_J8vXbfcksLWzhUQY5b82L9g,213
+scipy/io/matlab/tests/data/test_empty_struct.mat,sha256=WoC7g7TyXqNr2T0d5xE3IUq5PRzatE0mxXjqoHX5Xec,173
+scipy/io/matlab/tests/data/test_mat4_le_floats.mat,sha256=2xvn3Cg4039shJl62T-bH-VeVP_bKtwdqvGfIxv8FJ4,38
+scipy/io/matlab/tests/data/test_skip_variable.mat,sha256=pJLVpdrdEb-9SMZxaDu-uryShlIi90l5LfXhvpVipJ0,20225
+scipy/io/matlab/tests/data/testbool_8_WIN64.mat,sha256=_xBw_2oZA7u9Xs6GJItUpSIEV4jVdfdcwzmLNFWM6ow,185
+scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat,sha256=OWOBzNpWTyAHIcZABRytVMcABiRYgEoMyF9gDaIkFe4,536
+scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat,sha256=7111TN_sh1uMHmYx-bjd_v9uaAnWhJMhrQFAtAw6Nvk,536
+scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat,sha256=62p6LRW6PbM-Y16aUeGVhclTVqS5IxPUtsohe7MjrYo,283
+scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat,sha256=NkTA8UW98hIQ0t5hGx_leG-MzNroDelYwqx8MPnO63Q,283
+scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat,sha256=AeNaog8HUDCVrIuGICAXYu9SGDsvV6qeGjgvWHrVQho,568
+scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat,sha256=Gl4QA0yYwGxjiajjgWS939WVAM-W2ahNIm9wwMaT5oc,568
+scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat,sha256=CUGtkwIU9CBa0Slx13mbaM67_ec0p-unZdu8Z4YYM3c,228
+scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat,sha256=TeTk5yjl5j_bcnmIkpzuYHxGGQXNu-rK6xOsN4t6lX8,228
+scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat,sha256=WOwauWInSVUFBuOJ1Bo3spmUQ3UWUIlsIe4tYGlrU7o,176
+scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat,sha256=GpAEccizI8WvlrBPdvlKUv6uKbZOo_cjUK3WVVb2lo4,352
+scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat,sha256=3MEbf0zJdQGAO7x-pzFCup2QptfYJHQG59z0vVOdxl4,352
+scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat,sha256=VNHV2AIEkvPuhae1kKIqt5t8AMgUyr0L_CAp-ykLxt4,247
+scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat,sha256=8rWGf5bqY7_2mcd5w5gTYgMkXVePlLL8qT7lh8kApn0,247
+scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat,sha256=MzT7OYPEUXHYNPBrVkyKEaG5Cas2aOA0xvrO7l4YTrQ,103
+scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat,sha256=DpB-mVKx1gsjl-3IbxfxHNuzU5dnuku-MDQCA8kALVI,272
+scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat,sha256=4hY5VEubavNEv5KvcqQnd7MWWvFUzHXXpYIqUuUt-50,272
+scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat,sha256=N2QOOIXPyy0zPZZ_qY7xIDaodMGrTq3oXNBEHZEscw0,232
+scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat,sha256=TrkJ4Xx_dC9YrPdewlsOvYs_xag7gT3cN4HkDsJmT8I,232
+scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat,sha256=g96Vh9FpNhkiWKsRm4U6KqeKd1hNAEyYSD7IVzdzwsU,472
+scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat,sha256=2Zw-cMv-Mjbs2HkSl0ubmh_htFUEpkn7XVHG8iM32o0,472
+scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat,sha256=t5Ar8EgjZ7fkTUHIVpdXg-yYWo_MBaigMDJUGWEIrmU,218
+scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat,sha256=5PPvfOoL-_Q5ou_2nIzIrHgeaOZGFXGxAFdYzCQuwEQ,218
+scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat,sha256=ScTKftENe78imbMc0I5ouBlIMcEEmZgu8HVKWAMNr58,381
+scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat,sha256=ZoVbGk38_MCppZ0LRr6OE07HL8ZB4rHXgMj9LwUBgGg,4168
+scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat,sha256=14YMiKAN9JCPTqSDXxa58BK6Un7EM4hEoSGAUuwKWGQ,151
+scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat,sha256=ZdjNbcIE75V5Aht5EVBvJX26aabvNqbUH0Q9VBnxBS4,216
+scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat,sha256=OB82QgB6SwtsxT4t453OVSj-B777XrHGEGOMgMD1XGc,216
+scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat,sha256=-TYB0kREY7i7gt5x15fOYjXi410pXuDWUFxPYuMwywI,193
+scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat,sha256=l9psDc5K1bpxNeuFlyYIYauswLnOB6dTX6-jvelW0kU,193
+scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat,sha256=2914WYQajPc9-Guy3jDOLU3YkuE4OXC_63FUSDzJzX0,38
+scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat,sha256=2X2fZKomz0ktBvibj7jvHbEvt2HRA8D6hN9qA1IDicw,200
+scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat,sha256=i364SgUCLSYRjQsyygvY1ArjEaO5uLip3HyU-R7zaLo,200
+scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat,sha256=gtYNC9_TciYdq8X9IwyGEjiw2f1uCVTGgiOPFOiQbJc,184
+scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat,sha256=eXcoTM8vKuh4tQnl92lwdDaqssGB6G9boSHh3FOCkng,184
+scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat,sha256=Zhyu2KCsseSJ5NARdS00uwddCs4wmjcWNP2LJFns2-Q,240
+scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat,sha256=KI3H58BVj6k6MFsj8icSbjy_0Z-jOesWN5cafStLPG8,276
+scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat,sha256=Yr4YKCP27yMWlK5UOK3BAEOAyMr-m0yYGcj8v1tCx-I,276
+scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat,sha256=kzLxy_1o1HclPXWyA-SX5gl6LsG1ioHuN4eS6x5iZio,800
+scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat,sha256=dq_6_n0v7cUz9YziXn-gZFNc9xYtNxZ8exTsziWIM7s,672
+scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat,sha256=3z-boFw0SC5142YPOLo2JqdusPItVzjCFMhXAQNaQUQ,306
+scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat,sha256=5OwLTMgCBlxsDfiEUzlVjqcSbVQG-X5mIw5JfW3wQXA,306
+scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat,sha256=BCvppGhO19-j-vxAvbdsORIiyuJqzCuQog9Ao8V1lvA,40
+scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat,sha256=ThppTHGJFrUfal5tewS70DL00dSwk1otazuVdJrTioE,200
+scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat,sha256=SBfN6e7Vz1rAdi8HLguYXcHUHk1viaXTYccdEyhhob4,200
+scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat,sha256=m8W9GqvflfAsizkhgAfT0lLcxuegZIWCLNuHVX69Jac,184
+scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat,sha256=t9ObKZOLy3vufnER8TlvQcUkd_wmXbJSdQoG4f3rVKY,184
+scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat,sha256=5LX9sLH7Y6h_N_a1XRN2GuMgp_P7ECpPsXGDOypAJg0,194
+scipy/io/matlab/tests/data/testsimplecell.mat,sha256=Aoeh0PX2yiLDTwkxMEyZ_CNX2mJHZvyfuFJl817pA1c,220
+scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat,sha256=dFUcB1gunfWqexgR4YDZ_Ec0w0HffM1DUE1C5PVfDDc,223
+scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat,sha256=9Sgd_SPkGNim7ZL0xgD71qml3DK0yDHYC7VSNLNQEXA,280
+scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat,sha256=jp1ILNxLyV6XmCCGxAz529XoZ9dhCqGEO-ExPH70_Pg,328
+scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat,sha256=k8QuQ_4Zu7FWTzHjRnHCVZ9Yu5vwNP0WyNzu6TuiY-4,229
+scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat,sha256=QbZOCqIvnaK0XOH3kaSXBe-m_1_Rb33psq8E-WMSBTU,229
+scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat,sha256=QMVoBXVyl9RBGvAjLoiW85kAXYJ-hHprUMegEG69A5w,294
+scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat,sha256=WfEroAT5YF4HGAKq3jTJxlFrKaTCh3rwlSlKu__VjwA,304
+scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat,sha256=e0s6cyoKJeYMArdceHpnKDvtCVcw7XuB44OBDHpoa6U,400
+scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat,sha256=kgHcuq-deI2y8hfkGwlMOkW7lntexdPHfuz0ar6b3jo,241
+scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat,sha256=rYCaWNLXK7f_jjMc6_UvZz6ZDuMCuVRmJV5RyeXiDm8,241
+scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat,sha256=hnNV6GZazEeqTXuA9vcOUo4xam_UnKRYGYH9PUGTLv8,219
+scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat,sha256=cAhec51DlqIYfDXXGaumOE3Hqb3cFWM1UsUK3K_lDP8,375
+scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat,sha256=ciFzNGMO7gjYecony-E8vtOwBY4vXIUhyug6Euaz3Kg,288
+scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat,sha256=yrJrpLiwLvU_LI1D6rw1Pk1qJK1YlC7Cmw7lwyJVLtw,288
+scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat,sha256=zo7sh-8dMpGqhoNxLEnfz3Oc7RonxiY5j0B3lxk0e8o,224
+scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat,sha256=igL_CvtAcNEa1nxunDjQZY5wS0rJOlzsUkBiDreJssk,224
+scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat,sha256=pRldk-R0ig1k3ouvaR9oVtBwZsQcDW_b4RBEDYu1-Vk,156
+scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat,sha256=B9IdaSsyb0wxjyYyHOj_GDO0laAeWDEJhoEhC9xdm1E,232
+scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat,sha256=t4tKGJg2NEg_Ar5MkOjCoQb2hVL8Q_Jdh9FF4TPL_4g,232
+scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat,sha256=lpYkBZX8K-c4FO5z0P9DMfYc7Y-yzyg11J6m-19uYTU,203
+scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat,sha256=lG-c7U-5Bo8j8xZLpd0JAsMYwewT6cAw4eJCZH5xf6E,203
+scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat,sha256=3GJbA4O7LP57J6IYzmJqTPeSJrEaiNSk-rg7h0ANR1w,608
+scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat,sha256=fRbqAnzTeOU3dTQx7O24MfMVFr6pM5u594FRrPPkYJE,552
+scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat,sha256=mCtI_Yot08NazvWHvehOZbTV4bW_I4-D5jBgJ6T9EbI,314
+scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat,sha256=52qaF4HRCtPl1jE6ljbkEl2mofZVAPpmBxrm-J5OTTI,314
+scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat,sha256=vneCpWBwApBGfeKzdZcybyajxjR-ZYf64j0l08_hU84,528
+scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat,sha256=gqhRpSfNNB5SR9sCp-wWrvokr5VV_heGnvco6dmfOvY,472
+scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat,sha256=6VDU0mtTBEG0bBHqKP1p8xq846eMhSZ_WvBZv8MzE7M,246
+scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat,sha256=ejtyxeeX_W1a2rNrEUUiG9txPW8_UtSgt8IaDOxE2pg,246
+scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat,sha256=sbi0wUwOrbU-gBq3lyDwhAbvchdtOJkflOR_MU7uGKA,496
+scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat,sha256=uTkKtrYBTuz4kICVisEaG7V5C2nJDKjy92mPDswTLPE,416
+scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat,sha256=o4F2jOhYyNpJCo-BMg6v_ITZQvjenXfXHLq94e7iwRo,252
+scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat,sha256=CNXO12O6tedEuMG0jNma4qfbTgCswAbHwh49a3uE3Yk,252
+scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat,sha256=KV97FCW-1XZiXrwXJoZPbgyAht79oIFHa917W1KFLwE,357
+scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat,sha256=9-8xzACZleBkMjZnbr8t4Ncs9B6mbzrONDblPnteBPU,357
+scipy/io/matlab/tests/data/testvec_4_GLNX86.mat,sha256=GQzR3mBVS266_NBfrRC9X0dLgmeu8Jl4r4ZYMOrn1V0,93
+scipy/io/matlab/tests/test_byteordercodes.py,sha256=FCHBAxeQZlhvTXw-AO-ukwTWvpN7NzmncBEDJ1P4de4,938
+scipy/io/matlab/tests/test_mio.py,sha256=BcQlSLmQqqNv7CQa1HcLJYVp6OtlMig9FeliyRTc98Q,44810
+scipy/io/matlab/tests/test_mio5_utils.py,sha256=eacgGg0TaQXOkG7iaeYovtWyjPgYCY50mHPoPjnHMTI,5389
+scipy/io/matlab/tests/test_mio_funcs.py,sha256=fSDaeVPvCRBFzqjWtXR5xIv9UQ_yv6Y_Nl5D5u0HIGo,1392
+scipy/io/matlab/tests/test_mio_utils.py,sha256=GX85RuLqr2HxS5_f7ZgrxbhswJy2GPQQoQbiQYg0s14,1594
+scipy/io/matlab/tests/test_miobase.py,sha256=xH4ZOR_b25TJLyIGqYQdeSASpTi8j-oIkRcO4D-R4us,1464
+scipy/io/matlab/tests/test_pathological.py,sha256=-Efeq2x2yAaLK28EKpai1vh4HsZTCteF_hY_vEGWndA,1055
+scipy/io/matlab/tests/test_streams.py,sha256=dcirMJ5slCA3eIjB9VRcGG3U2htTtXL8BiYOLvHCfds,7406
+scipy/io/mmio.py,sha256=jT06sWGxdylPF_jBjbrqV2H5TXVUa04R-38OGrN8DZs,569
+scipy/io/netcdf.py,sha256=iDIpKlQcPWf2u-jIoYsqYx3a5oqWCy-54AcFW_muzU0,880
+scipy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_idl.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_paths.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc,,
+scipy/io/tests/data/Transparent Busy.ani,sha256=vwoK3ysYo87-TwzvjerHjFjSPIGpw83jjiMDXcHPWjA,4362
+scipy/io/tests/data/array_float32_1d.sav,sha256=A_xXWkfS1sQCxP4ONezeEZvlKEXwZ1TPG2rCCFdmBNM,2628
+scipy/io/tests/data/array_float32_2d.sav,sha256=qJmN94pywXznXMHzt-L6DJgaIq_FfruVKJl_LMaI8UU,3192
+scipy/io/tests/data/array_float32_3d.sav,sha256=U7P6As7Nw6LdBY1pTOaW9C-O_NlXLXZwSgbT3H8Z8uk,13752
+scipy/io/tests/data/array_float32_4d.sav,sha256=Tl6erEw_Zq3dwVbVyPXRWqB83u_o4wkIVFOe3wQrSro,6616
+scipy/io/tests/data/array_float32_5d.sav,sha256=VmaBgCD854swYyLouDMHJf4LL6iUNgajEOQf0pUjHjg,7896
+scipy/io/tests/data/array_float32_6d.sav,sha256=lb7modI0OQDweJWbDxEV2OddffKgMgq1tvCy5EK6sOU,19416
+scipy/io/tests/data/array_float32_7d.sav,sha256=pqLWIoxev9sLCs9LLwxFlM4RCFwxHC4Q0dEEz578mpI,3288
+scipy/io/tests/data/array_float32_8d.sav,sha256=R8A004f9XLWvF6eKMNEqIrC6PGP1vLZr9sFqawqM8ZA,13656
+scipy/io/tests/data/array_float32_pointer_1d.sav,sha256=sV7qFNwHK-prG5vODa7m5HYK7HlH_lqdfsI5Y1RWDyg,2692
+scipy/io/tests/data/array_float32_pointer_2d.sav,sha256=b0brvK6xQeezoRuujmEcJNw2v6bfASLM3FSY9u5dMSg,3256
+scipy/io/tests/data/array_float32_pointer_3d.sav,sha256=a_Iyg1YjPBRh6B-N_n_BGIVjFje4K-EPibKV-bPbF7E,13816
+scipy/io/tests/data/array_float32_pointer_4d.sav,sha256=cXrkHHlPyoYstDL_OJ15-55sZOOeDNW2OJ3KWhBv-Kk,6680
+scipy/io/tests/data/array_float32_pointer_5d.sav,sha256=gRVAZ6jeqFZyIQI9JVBHed9Y0sjS-W4bLseb01rIcGs,7960
+scipy/io/tests/data/array_float32_pointer_6d.sav,sha256=9yic-CQiS0YR_ow2yUA2Nix0Nb_YCKMUsIgPhgcJT1c,19480
+scipy/io/tests/data/array_float32_pointer_7d.sav,sha256=Rp1s8RbW8eoEIRTqxba4opAyY0uhTuyy3YkwRlNspQU,3352
+scipy/io/tests/data/array_float32_pointer_8d.sav,sha256=Wk3Dd2ClAwWprXLKZon3blY7aMvMrJqz_NXzK0J5MFY,13720
+scipy/io/tests/data/example_1.nc,sha256=EkfC57dWXeljgXy5sidrJHJG12D1gmQUyPDK18WzlT4,1736
+scipy/io/tests/data/example_2.nc,sha256=wywMDspJ2QT431_sJUr_5DHqG3pt9VTvDJzfR9jeWCk,272
+scipy/io/tests/data/example_3_maskedvals.nc,sha256=P9N92jCJgKJo9VmNd7FeeJSvl4yUUFwBy6JpR4MeuME,1424
+scipy/io/tests/data/fortran-3x3d-2i.dat,sha256=oYCXgtY6qqIqLAhoh_46ob_RVQRcV4uu333pOiLKgRM,451
+scipy/io/tests/data/fortran-mixed.dat,sha256=zTi7RLEnyAat_DdC3iSEcSbyDtAu0aTKwUT-tExjasw,40
+scipy/io/tests/data/fortran-sf8-11x1x10.dat,sha256=KwaOrZOAe-wRhuxvmHIK-Wr59us40MmiA9QyWtIAUaA,888
+scipy/io/tests/data/fortran-sf8-15x10x22.dat,sha256=5ohvjjOUcIsGimSqDhpUUKwflyhVsfwKL5ElQe_SU0I,26408
+scipy/io/tests/data/fortran-sf8-1x1x1.dat,sha256=Djmoip8zn-UcxWGUPKV5wzKOYOf7pbU5L7HaR3BYlec,16
+scipy/io/tests/data/fortran-sf8-1x1x5.dat,sha256=Btgavm3w3c9md_5yFfq6Veo_5IK9KtlLF1JEPeHhZoU,48
+scipy/io/tests/data/fortran-sf8-1x1x7.dat,sha256=L0r9yAEMbfMwYQytzYsS45COqaVk-o_hi6zRY3yIiO4,64
+scipy/io/tests/data/fortran-sf8-1x3x5.dat,sha256=c2LTocHclwTIeaR1Pm3mVMyf5Pl_imfjIFwi4Lpv0Xs,128
+scipy/io/tests/data/fortran-si4-11x1x10.dat,sha256=OesvSIGsZjpKZlZsV74PNwy0Co0KH8-3gxL9-DWoa08,448
+scipy/io/tests/data/fortran-si4-15x10x22.dat,sha256=OJcKyw-GZmhHb8REXMsHDn7W5VP5bhmxgVPIAYG-Fj4,13208
+scipy/io/tests/data/fortran-si4-1x1x1.dat,sha256=1Lbx01wZPCOJHwg99MBDuc6QZKdMnccxNgICt4omfFM,12
+scipy/io/tests/data/fortran-si4-1x1x5.dat,sha256=L1St4yiHTA3v91JjnndYfUrdKfT1bWxckwnnrscEZXc,28
+scipy/io/tests/data/fortran-si4-1x1x7.dat,sha256=Dmqt-tD1v2DiPZkghGGZ9Ss-nJGfei-3yFXPO5Acpk4,36
+scipy/io/tests/data/fortran-si4-1x3x5.dat,sha256=3vl6q93m25jEcZVKD0CuKNHmhZwZKp-rv0tfHoPVP88,68
+scipy/io/tests/data/invalid_pointer.sav,sha256=JmgoISXC4r5fSmI5FqyapvmzQ4qpYLf-9N7_Et1p1HQ,1280
+scipy/io/tests/data/null_pointer.sav,sha256=P_3a_sU614F3InwM82jSMtWycSZkvqRn1apwd8XxbtE,2180
+scipy/io/tests/data/scalar_byte.sav,sha256=dNJbcE5OVDY_wHwN_UBUtfIRd13Oqu-RBEO74g5SsBA,2076
+scipy/io/tests/data/scalar_byte_descr.sav,sha256=DNTmDgDWOuzlQnrceER6YJ0NutUUwZ9tozVMBWQmuuY,2124
+scipy/io/tests/data/scalar_complex32.sav,sha256=NGd-EvmFZgt8Ko5MP3T_TLwyby6yS0BXM_OW8197hpU,2076
+scipy/io/tests/data/scalar_complex64.sav,sha256=gFBWtxuAajazupGFSbvlWUPDYK-JdWgZcEWih2-7IYU,2084
+scipy/io/tests/data/scalar_float32.sav,sha256=EwWQw2JTwq99CHVpDAh4R20R0jWaynXABaE2aTRmXrs,2072
+scipy/io/tests/data/scalar_float64.sav,sha256=iPcDlgF1t0HoabvNLWCbSiTPIa9rvVEbOGGmE_3Ilsk,2076
+scipy/io/tests/data/scalar_heap_pointer.sav,sha256=JXZbPmntXILsNOuLIKL8qdu8gDJekYrlN9DQxAWve0E,2204
+scipy/io/tests/data/scalar_int16.sav,sha256=kDBLbPYGo2pzmZDhyl8rlDv0l6TMEWLIoLtmgJXDMkk,2072
+scipy/io/tests/data/scalar_int32.sav,sha256=IzJwLvEoqWLO5JRaHp8qChfptlauU-ll3rb0TfDDM8Y,2072
+scipy/io/tests/data/scalar_int64.sav,sha256=-aSHQRiaE3wjAxINwuLX33_8qmWl4GUkTH45elTkA-8,2076
+scipy/io/tests/data/scalar_string.sav,sha256=AQ7iZ8dKk9QfnLdP9idKv1ojz0M_SwpL7XAUmbHodDQ,2124
+scipy/io/tests/data/scalar_uint16.sav,sha256=928fmxLsQM83ue4eUS3IEnsLSEzmHBklDA59JAUvGK8,2072
+scipy/io/tests/data/scalar_uint32.sav,sha256=X3RbPhS6_e-u-1S1gMyF7s9ys7oV6ZNwPrJqJ6zIJsk,2072
+scipy/io/tests/data/scalar_uint64.sav,sha256=ffVyS2oKn9PDtWjJdOjSRT2KZzy6Mscgd4u540MPHC4,2076
+scipy/io/tests/data/struct_arrays.sav,sha256=TzH-Gf0JgbP_OgeKYbV8ZbJXvWt1VetdUr6C_ziUlzg,2580
+scipy/io/tests/data/struct_arrays_byte_idl80.sav,sha256=oOmhTnmKlE60-JMJRRMv_zfFs4zqioMN8QA0ldlgQZo,1388
+scipy/io/tests/data/struct_arrays_replicated.sav,sha256=kXU8j9QI2Q8D22DVboH9fwwDQSLVvuWMJl3iIOhUAH8,2936
+scipy/io/tests/data/struct_arrays_replicated_3d.sav,sha256=s3ZUwhT6TfiVfk4AGBSyxYR4FRzo4sZQkTxFCJbIQMI,4608
+scipy/io/tests/data/struct_inherit.sav,sha256=4YajBZcIjqMQ4CI0lRUjXpYDY3rI5vzJJzOYpjWqOJk,2404
+scipy/io/tests/data/struct_pointer_arrays.sav,sha256=fkldO6-RO2uAN_AI9hM6SEaBPrBf8TfiodFGJpViaqg,2408
+scipy/io/tests/data/struct_pointer_arrays_replicated.sav,sha256=eKVerR0LoD9CuNlpwoBcn7BIdj3-8x56VNg--Qn7Hgc,2492
+scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav,sha256=vsqhGpn3YkZEYjQuI-GoX8Jg5Dv8A2uRtP0kzQkq4lg,2872
+scipy/io/tests/data/struct_pointers.sav,sha256=Zq6d5V9ZijpocxJpimrdFTQG827GADBkMB_-6AweDYI,2268
+scipy/io/tests/data/struct_pointers_replicated.sav,sha256=aIXPBIXTfPmd4IaLpYD5W_HUoIOdL5Y3Hj7WOeRM2sA,2304
+scipy/io/tests/data/struct_pointers_replicated_3d.sav,sha256=t1jhVXmhW6VotQMNZ0fv0sDO2pkN4EutGsx5No4VJQs,2456
+scipy/io/tests/data/struct_scalars.sav,sha256=LYICjERzGJ_VvYgtwJ_Up2svQTv8wBzNcVD3nsd_OPg,2316
+scipy/io/tests/data/struct_scalars_replicated.sav,sha256=lw3fC4kppi6BUWAd4n81h8_KgoUdiJl5UIt3CvJIuBs,2480
+scipy/io/tests/data/struct_scalars_replicated_3d.sav,sha256=xVAup6f1dSV_IsSwBQC3KVs0eLEZ6-o5EaZT9yUoDZI,3240
+scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav,sha256=gjv__ng9xH_sm34hyxCbCgO4AP--PZAfDOArH5omkjM,3586
+scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav,sha256=H0LLyv2lc2guzYGnx4DWXU6vB57JrRX-G9Dd4qGh0hM,3586
+scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav,sha256=KKz9SXv_R3gX_AVeED2vyhYnj4BvD1uyDiKpCT3ulZ0,17720
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav,sha256=YX1g8qdCOAG16vX9G6q4SsfCj2ZVk199jzDQ8S0zWYI,72
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav,sha256=bFrsRqw0QXmsaDtjD6TFP8hZ5jEYMyaCmt-ka_C6GNk,1024
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav,sha256=zMnhvZvrP4kyOWKVKfbBneyv03xvzgqXYhHNxsAxDJ4,13
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav,sha256=9qTCvpgdz3raecVN1ViggHPnQjBf47xmXod9iCDsEik,17720
+scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav,sha256=EqYBnEgTxTKvaTAtdA5HIl47CCFIje93y4hawR6Pyu0,7792
+scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav,sha256=hGYchxQFjrtvZCBo0ULi-xdZ8krqXcKdTl3NSUfqe8k,90
+scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav,sha256=h8CXsW5_ShKR197t_d-TUTlgDqOZ-7wK_EcVGucR-aY,74
+scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav,sha256=BoUCDct3GiY_JJV_HoghF3mzAebT18j02c-MOn19KxU,70
+scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav,sha256=R6EJshvQp5YVR4GB9u4Khn5HM1VMfJUj082i8tkBIJ8,1644
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav,sha256=t2Mgri3h6JLQDekrwIhDBOaG46OUzHynUz0pKbvOpNU,90
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav,sha256=yCv0uh-ux_skJsxeOjzog0YBk3ZQO_kw5HJHMqtVyI0,90
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav,sha256=oiMVsQV9-qGBz_ZwsfAkgA9BZXNjXbH4zxCGvvdT0RY,120
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav,sha256=e97XoPrPGJDIh8nO6mii__ViY5yVlmt4OnPQoDN1djs,134
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav,sha256=wbonKlzvzQ_bQYyBsj-GwnihZOhn0uxfKhL_nENCGNc,150
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav,sha256=Uu5QPQcbtnFlnxOd4zFGxpiTC4wgdp6JOoYJ2VMZIU0,164
+scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav,sha256=1F67h8tr2xz0C5K21T9y9gspcGA0qnSOzsl2vjArAMs,116
+scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav,sha256=TJvGU7GpgXdCrdrjzMlDtpieDMnDK-lWMMqlWjT23BY,89
+scipy/io/tests/data/various_compressed.sav,sha256=H-7pc-RCQx5y6_IbHk1hB6OfnhvuPyW6EJq4EwI9iMc,1015
+scipy/io/tests/test_fortran.py,sha256=U8BS4PZxbnIzg8-GHYTXMDpHlKcDhu6-8GCbX6PVqho,7531
+scipy/io/tests/test_idl.py,sha256=Q1ekSAxQdXN-MailSNDqaKHAQvyP9BxtOwGM3NpYyrw,20511
+scipy/io/tests/test_mmio.py,sha256=GXrcNLv-2roKPaisWRyf6i9hG-EmmNkKqOX4HPx29WA,27874
+scipy/io/tests/test_netcdf.py,sha256=8BpKkEm-G0zymAjpvMS5doLLORwhnX35nzPaod4vMxM,19404
+scipy/io/tests/test_paths.py,sha256=3ewh_1yXujx3NIZ3deUjepFJgJDa5IHIugxupLDhHoU,3178
+scipy/io/tests/test_wavfile.py,sha256=LLYFtOeL4vPdk7221TcQ_J3aVPVe9IfV16GyHCSoeAo,15647
+scipy/io/wavfile.py,sha256=Jgz3Qi_6RXNphZVx6riCGK4qovdBbcnzI4726a0ex4I,26625
+scipy/linalg.pxd,sha256=0MlO-o_Kr8gg--_ipXEHFGtB8pZdHX8VX4wLYe_UzPg,53
+scipy/linalg/__init__.py,sha256=UOFZX4GCusrQjcaPB6NNNerhsVDe707BvlfE7XB8KzU,7517
+scipy/linalg/__pycache__/__init__.cpython-310.pyc,,
+scipy/linalg/__pycache__/_basic.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_ldl.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_polar.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_schur.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_svd.cpython-310.pyc,,
+scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc,,
+scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc,,
+scipy/linalg/__pycache__/_misc.cpython-310.pyc,,
+scipy/linalg/__pycache__/_procrustes.cpython-310.pyc,,
+scipy/linalg/__pycache__/_sketches.cpython-310.pyc,,
+scipy/linalg/__pycache__/_solvers.cpython-310.pyc,,
+scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc,,
+scipy/linalg/__pycache__/_testutils.cpython-310.pyc,,
+scipy/linalg/__pycache__/basic.cpython-310.pyc,,
+scipy/linalg/__pycache__/blas.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc,,
+scipy/linalg/__pycache__/interpolative.cpython-310.pyc,,
+scipy/linalg/__pycache__/lapack.cpython-310.pyc,,
+scipy/linalg/__pycache__/matfuncs.cpython-310.pyc,,
+scipy/linalg/__pycache__/misc.cpython-310.pyc,,
+scipy/linalg/__pycache__/special_matrices.cpython-310.pyc,,
+scipy/linalg/_basic.py,sha256=bG3YlFR2vgoF8ijCkedBmEw4x0iAS_5-orpUdDxcE78,68914
+scipy/linalg/_blas_subroutines.h,sha256=3nanVNwivmwbWRd42BNZB4G2lH7i5nYnsvO3gEohZQE,18134
+scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so,sha256=4QlArUqYu3kYG1frieTVJ27tDzvSomXNPsRMq1NXQHY,633088
+scipy/linalg/_cythonized_array_utils.pxd,sha256=OlWTbJt3gmdrfRFyx_Vz7GTmDTjr8dids5HA4TfC6R0,890
+scipy/linalg/_cythonized_array_utils.pyi,sha256=HZWXvJdpXGcydTEjkaL_kXIcxpcMqBBfFz7ZhscsRNo,340
+scipy/linalg/_decomp.py,sha256=ta_h9p6FoKFEe1pzV759Cinnrj00GsaHmGil6XIOf0Y,62177
+scipy/linalg/_decomp_cholesky.py,sha256=aOKQKj0WG6j-UBUifPwoSx6NFmUa5RftayITRrD_tAw,11815
+scipy/linalg/_decomp_cossin.py,sha256=N1TCrFf_-umaWn035E4CtxOBCkHROaFEhSqZLITLB3M,8973
+scipy/linalg/_decomp_ldl.py,sha256=HYzVUNZgEyuC2ZoFOGneas8ZkhhOFzUGcapL3Pos_cE,12535
+scipy/linalg/_decomp_lu.py,sha256=6KMcxOyCxLNFmzqh-DPmti8ck0gWQtSRdZmXUMMzzEs,12588
+scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so,sha256=ens7MPKv-1i_5o3E4blXB-Cu1giUKhA3bJR2nz5RAXM,270816
+scipy/linalg/_decomp_lu_cython.pyi,sha256=EASCkhrbJcBHo4zMYCUl1qRJDvPrvCqxd1TfqMWEd_U,291
+scipy/linalg/_decomp_polar.py,sha256=arzJ40FP1-TFsRvXPCP1qdNTsT60lkBcKBHfhB2JxxY,3578
+scipy/linalg/_decomp_qr.py,sha256=n9241Aj2DY7RALMK4E22zApBppIMc-BV5P8mBOpML5g,13776
+scipy/linalg/_decomp_qz.py,sha256=uH93in1ikPR-Wgi1g49EPm2XXuhKOWBzPUJEahCotx8,16330
+scipy/linalg/_decomp_schur.py,sha256=yUUR-4mtWG0qjtz6UMhj5L0PMNGKLH5m12KElks4Gtk,10419
+scipy/linalg/_decomp_svd.py,sha256=Egoy9LMjsNsykHqPp584LT43sVAyHS8LEWM1wUF7LDg,15616
+scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so,sha256=oOVzbfeQa7g-7aZYEy-kjcFOW8yxSnW8Ntn9PjHIAJU,372704
+scipy/linalg/_expm_frechet.py,sha256=efAQwON5vV4D_8NAe3EAM1NMNibQUlNZHjFmmp48Bs4,12328
+scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so,sha256=PokAppTl9GvdssvRY8iy2pc50081fxdPeHQx9BJx154,642017
+scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so,sha256=RWX6DQ3EQNB0GTqJdwxQWioXRj3P1jRug9oIWPIvGx4,2066281
+scipy/linalg/_interpolative.cpython-310-x86_64-linux-gnu.so,sha256=SdF0-ostVysuUhGw1RBMBrMKsrBio_hnp78RSnOq0IE,457113
+scipy/linalg/_interpolative_backend.py,sha256=yycf_ceX0dgf7Usjvtaxmkm_cT-2jmEMBuWY6tJST2g,45192
+scipy/linalg/_lapack_subroutines.h,sha256=E4T9vai7YJAJZ9HBMyGRpCm36NEufmTTdZDjWe-DwNA,239303
+scipy/linalg/_matfuncs.py,sha256=oD7Ni2R7EQsJNRiQRt_LvM6cz-DCWOYEzUeOm1e5pUE,24331
+scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so,sha256=jsajvaJRa66ms_qSo2IGaE19WDhYecbXFz3NcXhmxhk,525696
+scipy/linalg/_matfuncs_expm.pyi,sha256=GCTnQ9X_CNNpadcYhDFhjL2WBhzfdnt0mkW1ms34cjY,187
+scipy/linalg/_matfuncs_inv_ssq.py,sha256=THG87Ac9olliQ9tKjshCo1NRzb1QfgGHOOUomedP4eE,28059
+scipy/linalg/_matfuncs_sqrtm.py,sha256=ijwi8Kqx8n4EIbTThMcyyJfDjjK51B_dCBM27tZdQLQ,6820
+scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so,sha256=azWjVeOW-0rLYFs294LEGV9D4TADoni1RRcytPqTEV8,276432
+scipy/linalg/_misc.py,sha256=3IPq-LIQcxV7ELbtcgZK8Ri60YWbhpN_y7UYe6BKEgA,6283
+scipy/linalg/_procrustes.py,sha256=aa5KcFwCM0wcwnLhwwBq_pWIMhfZoB5wIHY2ocS7Xc0,2763
+scipy/linalg/_sketches.py,sha256=n6PEJILrFpzWhdf-sKFgGN-0elEwqvBlI0Z3H54tk0c,6145
+scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so,sha256=0aT_Xi4m-LM8tsmyxOueU3sr70Eb2RSgJPl1H-M6MGM,300152
+scipy/linalg/_solvers.py,sha256=q-bHb_WR4D3a_uOWpiD2zclBhotdxwPO8OwC4V0KGM4,28342
+scipy/linalg/_special_matrices.py,sha256=NieLFLp1O_6BlgAx_fVRr2bVrqaFFS5VySRVNBFnIbc,36865
+scipy/linalg/_testutils.py,sha256=oUEc8_lllXP18Ugrv3KlEcibTL6Mem5iEAyZJg4hNwE,1753
+scipy/linalg/basic.py,sha256=0uMJev4ZSqcrZ4FEV50FQyzf1U39QAhTu8gI_s_0R90,797
+scipy/linalg/blas.py,sha256=WcuILhaA_wqcz2NJRl8gNabzec8Xi-kj4HeRS-EJhYY,11697
+scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so,sha256=PG3NCTAQ1ZeRITbysRu-U4IljmguzLvjMmknDd91fH0,348849
+scipy/linalg/cython_blas.pxd,sha256=DCPBxNWP-BvdT_REj6_a4TjUrNaf6sCq_XoxU3pEbfc,15592
+scipy/linalg/cython_blas.pyx,sha256=DFCT-H2mDlf-KtVcTB4DQyCRSIIQjd1zB3r8NSUafrY,64918
+scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so,sha256=8vkROhZcpQmId9eNfAcaJgSN2zrXtU6UXSrFXg-39pQ,837713
+scipy/linalg/cython_lapack.pxd,sha256=Ld5hPwcYxpOPahFNsfNomsp0_DY8BfG-W8TmZxh-iYM,204556
+scipy/linalg/cython_lapack.pyx,sha256=dLADFnGKlafqoLZOE7OqVmj2pzhWDNut0KJMzh_i9w4,706982
+scipy/linalg/decomp.py,sha256=imZLuEFtV2WakBzX1DPiWCgUw00t4bEXyMyjtyQu_B4,838
+scipy/linalg/decomp_cholesky.py,sha256=LfsMeb0QgOX2nLKgCsZCpi-mXBxGT596kPYVeRALok8,688
+scipy/linalg/decomp_lu.py,sha256=1KQnoczngZjaNxs_CAP6-eUcyw2igK1PrmNHm1vhRlk,614
+scipy/linalg/decomp_qr.py,sha256=QRjlkvSPo65naiTUDK823r6DnrcxDucOma6Z_DTLG0I,579
+scipy/linalg/decomp_schur.py,sha256=6GtwTodRgqTY9tsmPpdKtIIgOGSEYub4_F2tmCYChvw,660
+scipy/linalg/decomp_svd.py,sha256=HrJqbmgde7d7EWxCsa9XkS9QuWgPYMFOHiF4NcAL_Qg,631
+scipy/linalg/interpolative.py,sha256=tPB5mfxVk_g0VSP1Y6YG4cqUkCSNYg7eomlu5KzhiO0,32251
+scipy/linalg/lapack.py,sha256=1-XWvhL1N7R6vXQTturAC9CLEzoJSq0ata_molM_R2c,15667
+scipy/linalg/matfuncs.py,sha256=G21MOYFXuqlDzWdBWC6FQ_nh5Hv0QwZaDDJ3PTwtHmY,883
+scipy/linalg/misc.py,sha256=uxpR80jJ5w5mslplWlL6tIathas8mEXvRIwDXYMcTOk,592
+scipy/linalg/special_matrices.py,sha256=tLbqSB71b5ucf8nFIAmkKmnFLEZbZk8IXYl4zZs_30g,771
+scipy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_lu.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc,,
+scipy/linalg/tests/data/carex_15_data.npz,sha256=E_PhSRqHa79Z1-oQrSnB-bWZaiq5khbzHVv81lkBLB4,34462
+scipy/linalg/tests/data/carex_18_data.npz,sha256=Wfg5Rn8nUrffb7bUCUOW7dMqWSm3ZPf_oeZmZDHmysY,161487
+scipy/linalg/tests/data/carex_19_data.npz,sha256=OOj8ewQd8LI9flyhXq0aBl5kZ2Ee-ahIzH25P4Ct_Yc,34050
+scipy/linalg/tests/data/carex_20_data.npz,sha256=FOIi00pxGMcoShZ1xv7O7ne4TflRpca6Kl7p_zBU-h0,31231
+scipy/linalg/tests/data/carex_6_data.npz,sha256=GyoHNrVB6_XEubTADW2rKB5zyfuZE8biWBp4Gze2Avk,15878
+scipy/linalg/tests/data/gendare_20170120_data.npz,sha256=o9-rRR2dXCAkPg7YXNi2yWV2afuaD4O1vhZVhXg9VbU,2164
+scipy/linalg/tests/test_basic.py,sha256=zia60-ir6RMT_f3dUwKZ32czTQR0GjmRQriQ7YBewfk,69951
+scipy/linalg/tests/test_blas.py,sha256=_egnuCdKf89WuIkm45pl_02wMoHV3c4mvZ3uUV4NoWA,40842
+scipy/linalg/tests/test_cython_blas.py,sha256=0Y2w1Btw6iatfodZE7z0lisJJLVCr70DAW-62he_sz4,4087
+scipy/linalg/tests/test_cython_lapack.py,sha256=McSFDUU4kgCavU1u3-uqBGlzUZiLGxM5qPfBFgPTqdE,796
+scipy/linalg/tests/test_cythonized_array_utils.py,sha256=O1EKWxsYt6k1zMWjFlQhTndQVOhHsJlSm-bHfPMny1U,3840
+scipy/linalg/tests/test_decomp.py,sha256=i_Yzs6RMKM1VdSPCSOGeYzueKO2iKbh0Ph8RBRItIaY,106420
+scipy/linalg/tests/test_decomp_cholesky.py,sha256=FKAGOFEcx3Bh8NvZHoUjaDov-a6VpLdjSAswaxjACLY,7857
+scipy/linalg/tests/test_decomp_cossin.py,sha256=Z9QpHHszBuZ-OorqILNK0Oly7sMvXNhbYLTZHNKd3YI,5955
+scipy/linalg/tests/test_decomp_ldl.py,sha256=9h96PmHpoXIbjzc5nPxA3Dzw4575IelqxXw2aiNjabo,4944
+scipy/linalg/tests/test_decomp_lu.py,sha256=i7K4zDx3PocMSPYJzaS0IiZuVRphC_CXzLreK1FNkIE,11186
+scipy/linalg/tests/test_decomp_polar.py,sha256=5x5vz9rJE2U2nvo0kx6xMX5Z9OcnqxayPZvAd4dwsUQ,2646
+scipy/linalg/tests/test_decomp_update.py,sha256=kPMpEe2ddl3rdEDhPlj-cdBL4BsPK3CAtf9g5k55vSo,68490
+scipy/linalg/tests/test_fblas.py,sha256=Ykb7LKjbxPXAdJD-IkXMAsbUmXMAkku2FQCr-jlDTUE,18687
+scipy/linalg/tests/test_interpolative.py,sha256=Y9yGVHR1OMZWHgrX_HmBx446TACjkARoxyHwT49iEuw,8969
+scipy/linalg/tests/test_lapack.py,sha256=4dBJoJkgtXWnuof3Xx8UTBqWZ6lrg8h7NUeihxKIgsY,129349
+scipy/linalg/tests/test_matfuncs.py,sha256=6b5wMGDvMI2PeimrjWastS3pZSE4f1-ETezFeJeyz6E,39926
+scipy/linalg/tests/test_matmul_toeplitz.py,sha256=Wd9T03zZRwX3M3ppkhYJiJbkWZ_xop4VKj57TjeozUs,3870
+scipy/linalg/tests/test_misc.py,sha256=HP9jfKohbJIaKVcBqov9hAOHYk5dZck497-V5DMHe6E,76
+scipy/linalg/tests/test_procrustes.py,sha256=WkNNarBf69izBmlOhu4-u0eWdzkSzYHQuDZh-w89fOU,6758
+scipy/linalg/tests/test_sketches.py,sha256=FVEcNV43JteZZU7GDdBjtl-_alYDimxnjgKvpmtzVsI,3960
+scipy/linalg/tests/test_solve_toeplitz.py,sha256=KuTAYh-8MRWjaHclgQuIaBBx8IBTGEzXgZnhM_gjWxo,4010
+scipy/linalg/tests/test_solvers.py,sha256=degoX4OXSpo_6F59TyHcNdtcY3HCbkkGJRHldDfgdPs,31642
+scipy/linalg/tests/test_special_matrices.py,sha256=7IbOPS0DyTC1zwEXbrjRr3NnctiTGlZsNRVqsJF17hQ,23596
+scipy/misc/__init__.py,sha256=CdX9k6HUYu_cqVF4l2X5h1eqd9xUCuKafO_0aIY5RNE,1726
+scipy/misc/__pycache__/__init__.cpython-310.pyc,,
+scipy/misc/__pycache__/_common.cpython-310.pyc,,
+scipy/misc/__pycache__/common.cpython-310.pyc,,
+scipy/misc/__pycache__/doccer.cpython-310.pyc,,
+scipy/misc/_common.py,sha256=4pb0UjMkG0GBlJ2IgZ4NDiu2vlPCxfL2r0BCOSpOFdE,11153
+scipy/misc/ascent.dat,sha256=6KhJOUhEY6uAUa7cW0CqJiqzOpHWRYps0TxqHK1aAj0,527630
+scipy/misc/common.py,sha256=V67COWNbYuMJwdPMypUiimxSShtUXaq8RSop35sOiuM,619
+scipy/misc/doccer.py,sha256=hUk7LlSlkTY28QjqyHv4HI8cWUDnZyg1PbMLvL3-Yso,1458
+scipy/misc/ecg.dat,sha256=8grTNl-5t_hF0OXEi2_mcIE3fuRmw6Igt_afNciVi68,119035
+scipy/misc/face.dat,sha256=nYsLTQgTE-K0hXSMdwRy5ale0XOBRog9hMcDBJPoKIY,1581821
+scipy/misc/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/misc/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_common.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_config.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc,,
+scipy/misc/tests/test_common.py,sha256=0h_qT7hwQnqx4Oc6ccvM-U79EkbXPq5LNlC3QSvR88M,833
+scipy/misc/tests/test_config.py,sha256=j1Ppp6DCZy9wMxTmBEGxq4MScvsQXTQk7268EnNnPFQ,1244
+scipy/misc/tests/test_doccer.py,sha256=V1B5Z-XfIQFiSyRNo3PXG-AQfToFmoQ1oOBGjxK2zmo,3738
+scipy/ndimage/__init__.py,sha256=2dI3Sj1jF2AR1xSghzX4E5NFYxN9Z3-qd0a6YDRpPE4,4989
+scipy/ndimage/__pycache__/__init__.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_filters.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_fourier.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_measurements.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_morphology.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc,,
+scipy/ndimage/__pycache__/filters.cpython-310.pyc,,
+scipy/ndimage/__pycache__/fourier.cpython-310.pyc,,
+scipy/ndimage/__pycache__/interpolation.cpython-310.pyc,,
+scipy/ndimage/__pycache__/measurements.cpython-310.pyc,,
+scipy/ndimage/__pycache__/morphology.cpython-310.pyc,,
+scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so,sha256=h98uh-F0_Ywmq7sQkE-zVgPCuj5JX3uZqeFVBgpYS0A,17008
+scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so,sha256=foCM32TOb2d_cAnZNdKGZfl4ZDontGMixm5kiuVIQk4,90984
+scipy/ndimage/_filters.py,sha256=tF-yf0az51r2dPkhK2CatkGNc1vDUnQHWF1BHXi8l70,65695
+scipy/ndimage/_fourier.py,sha256=X-Y0EP59mH5ogqts58SpDhxA0dfqplwZQ8T0G6DzPos,11385
+scipy/ndimage/_interpolation.py,sha256=xtG_a3pksNFF1tm7gl-2v36Zy8fxN4iPn2-j348Obdw,37023
+scipy/ndimage/_measurements.py,sha256=7yn0c2ygTZm12oKUapXHT4r8MZ263ennI_qpEzXC8YM,56097
+scipy/ndimage/_morphology.py,sha256=HKKP__gdrLNYDtp6J1qIzrcmpq7MYO7DpGHYAgyHMrk,94913
+scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so,sha256=riAtjLhu1Lpfxlq9tu_l6HVBP0P9LS2wWGWGe35yvOI,147184
+scipy/ndimage/_ni_docstrings.py,sha256=Pxf50i8Wzrm2M70NkUrbdv901hsJ5XcRHVwyxHmXQJk,8505
+scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so,sha256=zE9mnBeiXgM-J-pGm-deH4TK1949x5XHUJUyA0WHaVs,428200
+scipy/ndimage/_ni_support.py,sha256=rO5ihuExCyN0o5mFUqU1ckg3pprTPpj8a1EZfIIdwqY,4646
+scipy/ndimage/filters.py,sha256=cAv2zezrTJEm9JzKPV_pmXzZcgczCK_VaYJ4mdNW3FM,976
+scipy/ndimage/fourier.py,sha256=gnifi4S_Epyu4DpNsebz4A5BKzBWoGf11FkXWeXsoqY,599
+scipy/ndimage/interpolation.py,sha256=KzQNWvuqSrUfGcfe7gFSX9bHo7jVy76fErfjnpqbIaM,680
+scipy/ndimage/measurements.py,sha256=xdSs52Y5RjURLP710iGURXWQFeS3ok4WjoYufKh9OeA,788
+scipy/ndimage/morphology.py,sha256=yFWSo7o_7PuYq61WGQOCIgMppneNLxqhJocyN0bMsVA,965
+scipy/ndimage/tests/__init__.py,sha256=LUFQT_tCLZ6noa1Myz-TwTfwRaSZ96zqJJUWNyMfb_k,395
+scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc,,
+scipy/ndimage/tests/data/label_inputs.txt,sha256=JPbEnncwUyhlAAv6grN8ysQW9w9M7ZSIn_NPopqU7z4,294
+scipy/ndimage/tests/data/label_results.txt,sha256=Cf2_l7FCWNjIkyi-XU1MaGzmLnf2J7NK2SZ_10O-8d0,4309
+scipy/ndimage/tests/data/label_strels.txt,sha256=AU2FUAg0WghfvnPDW6lhMB1kpNdfv3coCR8blcRNBJ8,252
+scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114
+scipy/ndimage/tests/test_c_api.py,sha256=wZv9LUefK1Fnq__xemuxW2GDdRMdNN7gCqhWkdqZLZQ,3730
+scipy/ndimage/tests/test_datatypes.py,sha256=tpCXBY_MH-NcCuytUVVnLbDy1q_3NN7hH245cpqhvsI,2827
+scipy/ndimage/tests/test_filters.py,sha256=IisrzOqjhMwwRjxw05pUBqAHh_OSwLNla9_p1nZWlGo,93325
+scipy/ndimage/tests/test_fourier.py,sha256=DlD_Eb1jZF_3y2wxi1IJaXI3566da9fnbY7jVtUZ42o,6664
+scipy/ndimage/tests/test_interpolation.py,sha256=3kTKe5U76lDnEGTAWW9SzHyCnkbcr2KM1CluN_nUicc,54771
+scipy/ndimage/tests/test_measurements.py,sha256=vgGx-V5jTigVaKxE-dasZ5w9fUfRuzD0QszQV4lOM04,48181
+scipy/ndimage/tests/test_morphology.py,sha256=0qFGtsQkCn20vY9c4C10eeg44R4leNYO4F0BHAWSaNU,106687
+scipy/ndimage/tests/test_ni_support.py,sha256=kuf8otEyIlGVPzcEPekRK7lfXI8bVEvB2_YF6ko7jzg,2472
+scipy/ndimage/tests/test_splines.py,sha256=4dXpWNMKwb2vHMdbNc2jEvAHzStziq8WRh4PTUkoYpQ,2199
+scipy/odr/__init__.py,sha256=CErxMJ0yBfu_cvCoKJMu9WjqUaohLIqqf228Gm9XWJI,4325
+scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so,sha256=SaDHNH3mKMIGrcVtc9ZgnwXi-rudrDM_oUq-_05ubdQ,222969
+scipy/odr/__pycache__/__init__.cpython-310.pyc,,
+scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/odr/__pycache__/_models.cpython-310.pyc,,
+scipy/odr/__pycache__/_odrpack.cpython-310.pyc,,
+scipy/odr/__pycache__/models.cpython-310.pyc,,
+scipy/odr/__pycache__/odrpack.cpython-310.pyc,,
+scipy/odr/_add_newdocs.py,sha256=GeWL4oIb2ydph_K3qCjiIbPCM3QvpwP5EZwEJVOzJrQ,1128
+scipy/odr/_models.py,sha256=tfOLgqnV4LR3VKi7NAg1g1Jp_Zw8lG_PA5BHwU_pTH0,7800
+scipy/odr/_odrpack.py,sha256=SaYqOX4MwAOAGBxK8ICbu1wH6vaBJCqF1RQoqCTIoiM,42401
+scipy/odr/models.py,sha256=Fcdj-P9rJ_B-Ct8bh3RrusnapeHLysVaDsM26Q8fHFo,590
+scipy/odr/odrpack.py,sha256=OlRlBxKlzp5VDi2fnnA-Jdl6G0chDt95JNCvJYg2czs,632
+scipy/odr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/odr/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc,,
+scipy/odr/tests/test_odr.py,sha256=ajJfXACR24a5cEqG7BiwAdoDYpAmvS1I6L7U3Gm-zL4,21011
+scipy/optimize.pxd,sha256=kFYBK9tveJXql1KXuOkKGvj4Fu67GmuyRP5kMVkMbyk,39
+scipy/optimize/README,sha256=FChXku722u0youZGhUoQg7VzDq0kOJ6MCohYcSQWSrg,3221
+scipy/optimize/__init__.py,sha256=YUWDGxYsG4UmFsjNTMi5yWxB3mdLQUh9wbcnz4ATG0g,13108
+scipy/optimize/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc,,
+scipy/optimize/__pycache__/_bracket.cpython-310.pyc,,
+scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc,,
+scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_constraints.cpython-310.pyc,,
+scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentiate.cpython-310.pyc,,
+scipy/optimize/__pycache__/_direct_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc,,
+scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc,,
+scipy/optimize/__pycache__/_isotonic.cpython-310.pyc,,
+scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linesearch.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc,,
+scipy/optimize/__pycache__/_milp.cpython-310.pyc,,
+scipy/optimize/__pycache__/_minimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_nnls.cpython-310.pyc,,
+scipy/optimize/__pycache__/_nonlin.cpython-310.pyc,,
+scipy/optimize/__pycache__/_numdiff.cpython-310.pyc,,
+scipy/optimize/__pycache__/_optimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/_qap.cpython-310.pyc,,
+scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc,,
+scipy/optimize/__pycache__/_root.cpython-310.pyc,,
+scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc,,
+scipy/optimize/__pycache__/_shgo.cpython-310.pyc,,
+scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_spectral.cpython-310.pyc,,
+scipy/optimize/__pycache__/_tnc.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc,,
+scipy/optimize/__pycache__/_tstutils.cpython-310.pyc,,
+scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/cobyla.cpython-310.pyc,,
+scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc,,
+scipy/optimize/__pycache__/linesearch.cpython-310.pyc,,
+scipy/optimize/__pycache__/minpack.cpython-310.pyc,,
+scipy/optimize/__pycache__/minpack2.cpython-310.pyc,,
+scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc,,
+scipy/optimize/__pycache__/nonlin.cpython-310.pyc,,
+scipy/optimize/__pycache__/optimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/slsqp.cpython-310.pyc,,
+scipy/optimize/__pycache__/tnc.cpython-310.pyc,,
+scipy/optimize/__pycache__/zeros.cpython-310.pyc,,
+scipy/optimize/_basinhopping.py,sha256=ej5TxpHfW8-mH7rIsYtsaW9WGOj6FWmQUWab2YVlSNY,30691
+scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so,sha256=1n87nMOoMjskAlODyPZ3OF_N1On4IjO905OKEjYPWfE,364200
+scipy/optimize/_bracket.py,sha256=o-ZowrYRDTItOlCut9k0B60sjRbGH6R4bv5ScG0_Q14,28614
+scipy/optimize/_chandrupatla.py,sha256=SoGJwgIk3oWmRHG9EDgcG773fPdF1Z9SNJu2I3Hu2yA,23222
+scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so,sha256=cdQ0MuG9r-_thseq4yu11Vbzb3oKLhR9w3SHBzKL154,100545
+scipy/optimize/_cobyla_py.py,sha256=bLw81_uD6zBTLybEfJUA46_OMdnTmXObhGZcvgBARss,10869
+scipy/optimize/_constraints.py,sha256=_xlt1pkOpxXVJEj-yd_vkPfv20Pxt-us2yxlICngiY0,22854
+scipy/optimize/_dcsrch.py,sha256=D5I9G4oH5kFD2Rrb61gppXFMwwz6JiQBYPvW3vbR5Gs,25235
+scipy/optimize/_differentiable_functions.py,sha256=g-i-tnlS0RcWj6z8PF5cbNeYu_AjRjSbHmuewNN2juc,23665
+scipy/optimize/_differentialevolution.py,sha256=wCLdSrATmzlpyOn3oeoIx-GR2malvM3QZYkhRMgroqo,83206
+scipy/optimize/_differentiate.py,sha256=1cO7QcbxIs0g7gDl9Bo40X_c2PG13wWcYm4OpUHCGh8,30870
+scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so,sha256=eCBbO6L80921dGsv6Vc-JtNPd3nlmc3uzijfhTvxa0c,43480
+scipy/optimize/_direct_py.py,sha256=ShNGJHCdN02zGTQbBL5oEwxZ9yGH8dczXTsmnt1WJIg,11798
+scipy/optimize/_dual_annealing.py,sha256=23UWd8CkGU02s5TaYoiu8h3Tv4GZmaVKgvGFo685Wlc,30346
+scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so,sha256=BuL-HIRz4V5Jvpel1Ptz0xslrgMeUKSyXo3z9ynv4-U,96016
+scipy/optimize/_hessian_update_strategy.py,sha256=PBnp8tf7hHcXb7uOz-GLJpoB79TCmdQM2IIOVX6ubI0,15862
+scipy/optimize/_highs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so,sha256=-sV4_-lkmk5KrdpTsJzCnJXX7OUS8lXC5tn-KP9aaMw,36072
+scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so,sha256=nvY4f9awwbBFeIOnAVaUPK-DE4qhtV7IEYXybbMkv-4,4045920
+scipy/optimize/_highs/src/cython/HConst.pxd,sha256=ipav35Vt3T5POWpL3X0kGkXGMuDjfA8A61FPahnrRxI,5511
+scipy/optimize/_highs/src/cython/Highs.pxd,sha256=1fwhSznVl2Vl_XyXyUTmX8ajygpeJKSgWbkpHiH6QZo,2147
+scipy/optimize/_highs/src/cython/HighsIO.pxd,sha256=cnPDpEfuETXVLGdb4wgyVtQtKh5M2dd0rX9WidZG77U,705
+scipy/optimize/_highs/src/cython/HighsInfo.pxd,sha256=TKvi5wZQ5DH4trIw29PhGWHmMnb8Cz_zjrTBDoodtCM,735
+scipy/optimize/_highs/src/cython/HighsLp.pxd,sha256=ECXgv0gFOP2X12DPi1YWd_uybSAJ9hIll2SMUJ1DZjo,1106
+scipy/optimize/_highs/src/cython/HighsLpUtils.pxd,sha256=eEFgoY_td38M5baXYvvlyFM72x2b1VU_lMFV3Y7HL-8,289
+scipy/optimize/_highs/src/cython/HighsModelUtils.pxd,sha256=FzpoHqKLeMjwJCqM3qHWsxIZb69LNgfO9HsdwcbahZA,335
+scipy/optimize/_highs/src/cython/HighsOptions.pxd,sha256=XsDO_rR9Y-0yxKSstRuv6VffEKh6tqIxIuef1UuanuI,3160
+scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd,sha256=MzjcGCorYJ9NbroJIyZDOM_v8RU4a1kjl1up4DPUicA,261
+scipy/optimize/_highs/src/cython/HighsStatus.pxd,sha256=_pXo59wMcXeIw9mvZSwe9N77w3TaCVALe8ZghhPCF2M,339
+scipy/optimize/_highs/src/cython/SimplexConst.pxd,sha256=hLhOZdBa0qfy_d8ZrXHbQiTfPx11V2xAiH-TGfTClEo,5018
+scipy/optimize/_highs/src/cython/highs_c_api.pxd,sha256=LssK9RFO3D9eGRy2YjdncfnJQfKJ_cRHT6IxS9iV3lw,332
+scipy/optimize/_isotonic.py,sha256=g4puoNqjJyDrJRoC0kvfG_I-0KNjeEfGpfZM5-Ltn48,6054
+scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so,sha256=aY78uxs5PK45b1DhJJF32YcixvQ7CKBhs0vi4muElRI,125145
+scipy/optimize/_lbfgsb_py.py,sha256=AR6PWfz5xgHBT6GEG_V5e7S9wqN8CKYDe9C_ShpT_uA,20718
+scipy/optimize/_linesearch.py,sha256=-OwKJ52xl-pKeRM1kiCVgHGFkGrXW8BEGxUOiGcfEYc,27282
+scipy/optimize/_linprog.py,sha256=EE4T4NoZoTtTbGvERlKilCLQs2uxxt65TgWnRSuUQME,29719
+scipy/optimize/_linprog_doc.py,sha256=ejVGlwlW7xF5T7UkBbRpJ9-dBm6rcEAjXPbz-gWtdLA,61945
+scipy/optimize/_linprog_highs.py,sha256=QbrJwka_Kz3xbpOZymQcm7NteXmzT9yxCskefrZNL58,17573
+scipy/optimize/_linprog_ip.py,sha256=t43a8xJd9Ms8PSIFmdzmT6Pggner7l-Y5bkubWhlAI8,45785
+scipy/optimize/_linprog_rs.py,sha256=5PhSblTUv5bgI9yW5BN1Rmy09gjZFA1tg1BXWxAKOQQ,23146
+scipy/optimize/_linprog_simplex.py,sha256=I3hKTW_BFX0URJkByvqFL6bVBP5X84bq9ilXa2NxViY,24716
+scipy/optimize/_linprog_util.py,sha256=3i_IjuXNBnz-F25qdW6VJLF8bKbG9_kOXCPwb1u2IHo,62749
+scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so,sha256=_QJFQFXt3QnvlJeKxmEZilxTId4zEShI6oL3PteBWVI,27072
+scipy/optimize/_lsq/__init__.py,sha256=Yk4FSVEqe1h-qPqVX7XSkQNBYDtZO2veTmMAebCxhIQ,172
+scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc,,
+scipy/optimize/_lsq/bvls.py,sha256=7u5B8LfUbv3ZRZ8DAZKuDTSNRfDEBmTsn25VZtMMsKk,5195
+scipy/optimize/_lsq/common.py,sha256=nSiCudLnGfw1fWXXnsl5G7BslkYCMAMoC91QZOoVjq0,20523
+scipy/optimize/_lsq/dogbox.py,sha256=97htRlr-Yt-u4Ob3ks7avAMdnjJsO83uHUMjMYrhyjc,11682
+scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so,sha256=Lz_f0muMyegWsHnqYCrZ-wfpLYZMM_sLqMfGJaJ3XlI,235776
+scipy/optimize/_lsq/least_squares.py,sha256=XiGlnKJod4UV2YYXXuiNe4TJoh270b7fOFLjs8txxMY,39672
+scipy/optimize/_lsq/lsq_linear.py,sha256=0Zpy7C0jdGLOE00NBohsu2iWq8hXMMI0FeA6oruZ-Co,15180
+scipy/optimize/_lsq/trf.py,sha256=ElVHnB2Un3eaQ4jJ8KHHp-hwXfYHMypnSthfRO33P90,19477
+scipy/optimize/_lsq/trf_linear.py,sha256=jIs7WviOu_8Kpb7sTln8W7YLgkcndv0eGIP15g_mC4g,7642
+scipy/optimize/_milp.py,sha256=7Giiq-GsySyJzPQmWjwmbuSJyI4ZLPOmzkCbC2AHy9o,15187
+scipy/optimize/_minimize.py,sha256=bGnVzGLCcPHNRgFeBhuvIeCRUo6rRkatHTcYijtv6_E,48221
+scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so,sha256=BnjnQf-LaqWZ0pzhKBp1bJTibafj_afBFZCYzEN6NZM,78312
+scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so,sha256=XcH5OO0wa5JhtHmGgi_RD1xL5osseBLRA0bWRHRIWpA,61008
+scipy/optimize/_minpack_py.py,sha256=0lCQ_b1U8gFoaGs_6v_Mjq0QURPwyvS3L6x2LZWkOAA,44671
+scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so,sha256=WoQvywZTkiSxsfyZbtTHUZpNgpZO3N-hWsmqUaMjlH8,152168
+scipy/optimize/_nnls.py,sha256=0iAi7_xT306p9r674t0Yf5w-Czvzu7ki8hHTbKJZvk8,5484
+scipy/optimize/_nonlin.py,sha256=Om_vN7pckkm9wk_uBgmE5eQnv1Wk5RQ8Vk8P-aBH0rE,49821
+scipy/optimize/_numdiff.py,sha256=BEZjmEEVCv34UHth_JvDTICwhlJWKY6UdGcE0YVOgnc,28720
+scipy/optimize/_optimize.py,sha256=eOBZsdU17C6CwVEjjRMPEJiTBbv55Ts3VQ6F0_RY-Co,146575
+scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so,sha256=0KDFzcxQz_AgLD-mrZZOpb2wSeDK5805FuHDsoY5Eno,223832
+scipy/optimize/_qap.py,sha256=hFSa41-SnDya8Lro7UKViyx2Yz7ZpRfMKoaBTGNVqck,27831
+scipy/optimize/_remove_redundancy.py,sha256=JqaQo5XclDpilSzc1BFv4Elxr8CXlFlgV45ypUwALyc,18769
+scipy/optimize/_root.py,sha256=tsNdnGNqBlrXvzHR9yFYAebIX4C7Wwjwwx_sGXDcW0Y,27810
+scipy/optimize/_root_scalar.py,sha256=baTVT1Vi5ZeXLGxbxhbLkx4bRGA91uHfBzeiwcHUQpM,19595
+scipy/optimize/_shgo.py,sha256=bVUz409huFf-M6q5Rdyiap-NPusAdWyCHbo0rBZoDoQ,62257
+scipy/optimize/_shgo_lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/_complex.py,sha256=yzBQt3YjTcpw1PK4c_VJmi4CF94BZAiMMGDaTO1ai-8,50259
+scipy/optimize/_shgo_lib/_vertex.py,sha256=I2TAqEEdTK66Km6UIkrDm2-tKpeJUuFX7DAfTk3XvUg,13996
+scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so,sha256=pdoC9sJ2ip4zkjsokCcxbNDq245eZugiG3sU76FwNlA,86592
+scipy/optimize/_slsqp_py.py,sha256=cHOtSPw8AP50yoTCc2yl3EzkDKW-wa5XYdkRwaBRdm4,19088
+scipy/optimize/_spectral.py,sha256=cgBoHOh5FcTqQ0LD5rOx4K7ECc7sbnODvcrn15_QeTI,8132
+scipy/optimize/_tnc.py,sha256=Y6rzgteDEKU0sxJ9UOcEsgzTQ3PD6x0WNg4k2IBO-r0,16908
+scipy/optimize/_trlib/__init__.py,sha256=cNGWE1VffijqhPtSaqwagtBJvjJK-XrJ6K80RURLd48,524
+scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so,sha256=aFyKo2r0iTeqcsCasZwCdN5JXyPY4XLtEdjMKL1WVmE,380865
+scipy/optimize/_trustregion.py,sha256=r4CGiKYFqNKWDFA_XT23_d4oqscIm5eSnWQNyno85Ps,10801
+scipy/optimize/_trustregion_constr/__init__.py,sha256=c8J2wYGQZr9WpLIT4zE4MUgEj4YNbHEWYYYsFmxAeXI,180
+scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/canonical_constraint.py,sha256=690VxTb7JJ9RzGwa-LN2hASKlqQPmulyEDZA7I-XyLY,12538
+scipy/optimize/_trustregion_constr/equality_constrained_sqp.py,sha256=5NiEruWnhYL2zhhgZsuLMn-yb5NOFs_bX3sm5giG7I8,8592
+scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py,sha256=mWneWXy1bmte2nH_rq6VYPKXh9YlNIkiu3IG9uvRTck,25744
+scipy/optimize/_trustregion_constr/projections.py,sha256=EO0uHULrNw8pm99vY-gd3pOFQEqrqk_13lVde9iUjTA,13169
+scipy/optimize/_trustregion_constr/qp_subproblem.py,sha256=EtAhRcEtSnGsEeEZ2HGEzm-7r0pnXMCgl9NemKWvdzg,22592
+scipy/optimize/_trustregion_constr/report.py,sha256=_6b3C2G18tAgTstQSvqJbZVFYRxWKuUXFA1SAz95Y6k,1818
+scipy/optimize/_trustregion_constr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py,sha256=zVPxZDa0WkG_tw9Fm_eo_JzsQ8rQrUJyQicq4J12Nd4,9869
+scipy/optimize/_trustregion_constr/tests/test_projections.py,sha256=-UrTi0-lWm4hANoytCmyImSJUH9Ed4x3apHDyRdJg5o,8834
+scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py,sha256=7tapj8clx8M7K5imwnTA4t-_Jh_cAYeu6efbGg4PbSU,27723
+scipy/optimize/_trustregion_constr/tests/test_report.py,sha256=lbr947QQxz681HxTXEZZ0B6_2VNKiN85Inkz7XYhe4A,1070
+scipy/optimize/_trustregion_constr/tr_interior_point.py,sha256=HPyAfUzwu704yvplRMMMMvUKqBtC56gGUBvg218t-Zo,13798
+scipy/optimize/_trustregion_dogleg.py,sha256=HS783IZYHE-EEuF82c4rkFp9u3MNKUdCeynZ6ap8y8s,4389
+scipy/optimize/_trustregion_exact.py,sha256=s-X20WMrJhO36x3YEtxYepLqyxm1Chl7v8MjirrftUw,15555
+scipy/optimize/_trustregion_krylov.py,sha256=KGdudJsoXXROXAc82aZ8ACojD3rimvyx5PYitbo4UzQ,3030
+scipy/optimize/_trustregion_ncg.py,sha256=y7b7QjFBfnB1wDtbwnvKD9DYpz7y7NqVrJ9RhNPcipw,4580
+scipy/optimize/_tstutils.py,sha256=Q5dZTgMzvonIb2ggCU9a35M8k_iV6v8hK4HDdKE20PQ,33910
+scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so,sha256=8wEb0RyMpKpbOPVfUTFDfN-vRonegrTDLDToQjlta0Y,21648
+scipy/optimize/_zeros_py.py,sha256=FLSkeAm2CoRkjLx37lKS6pMEvmlsZ8agt_ahA_rtwcM,52190
+scipy/optimize/cobyla.py,sha256=6FcM--HbgtHfOZt5QzGCcmyH2wRmDA73UxN8tO8aIqE,619
+scipy/optimize/cython_optimize.pxd,sha256=ecYJEpT0CXN-2vtaZfGCChD-oiIaJyRDIsTHE8eUG5M,442
+scipy/optimize/cython_optimize/__init__.py,sha256=eehEQNmLGy3e_XjNh6t5vQIC9l_OREeE4tYRRaFZdNs,4887
+scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so,sha256=lLKeI281vqnvLx3x-3XyEbDUgwdzZxXMlKwwFK3DrYw,115552
+scipy/optimize/cython_optimize/_zeros.pxd,sha256=anyu-MgWhq24f1bywI4TlohvJjOnpNpkCtSzpKBJSSo,1239
+scipy/optimize/cython_optimize/c_zeros.pxd,sha256=6Gc0l1q-1nlCO9uKrYeXFiHsbimRZzU3t6EoTa8MVvA,1118
+scipy/optimize/lbfgsb.py,sha256=VHujkuUaSo6g_uQ2k5MqY1tvWUZrs9eqoZTAWCpRMY0,708
+scipy/optimize/linesearch.py,sha256=HKsTaTIl0eE3ZZbPNf3T_ulRpsQVzj4MuQ3BROvBU14,781
+scipy/optimize/minpack.py,sha256=I559Oh_EXey3U0Ixtz4lajjZeexPHMwnXS0aGX1qkY8,1054
+scipy/optimize/minpack2.py,sha256=-GBMcSUKuDdYiS9JmGvwXMnzshmCErFE0E8G66nc9Bw,547
+scipy/optimize/moduleTNC.py,sha256=qTEQ4IWtv_LT6fH3-iYmYNwrtrjG1gS4KFbZ73iDcd0,507
+scipy/optimize/nonlin.py,sha256=Soe0x_9z4QyXdOGJxZ98pksET4H-mqauonpZk49WF-A,1200
+scipy/optimize/optimize.py,sha256=uydjzFbjWgAN_lDMfOwjyGD7FEEhEbZIx3gBiUGKlL0,1240
+scipy/optimize/slsqp.py,sha256=K9gVnto2Ol-0wzGisZXR9MxlGGFhjKIdhPfkUwkWLic,809
+scipy/optimize/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc,,
+scipy/optimize/tests/test__basinhopping.py,sha256=QrDpRjbRnxgIDevxSovYFjC1UUrEr7g-goyzJHcFZms,18897
+scipy/optimize/tests/test__differential_evolution.py,sha256=sVjn7FKKbMUq64fkTDgmjVNvidMxhvh_hEogG8biVrQ,68844
+scipy/optimize/tests/test__dual_annealing.py,sha256=syotN4J2XhMSdTZaC95mlBRvzkh3Lce3mGtG05nH8dU,15173
+scipy/optimize/tests/test__linprog_clean_inputs.py,sha256=9HFrqlU1OHGTHCgy_R9w2rJ5A5xlu_3QpGbnzQezqXM,11678
+scipy/optimize/tests/test__numdiff.py,sha256=n0qb2yClsrDMNgrjvXqKZX_ww162ZF8C8_jbqvLrTiQ,31351
+scipy/optimize/tests/test__remove_redundancy.py,sha256=gwakPkJo8Y8aRL4son1bp8USfwc9uMrLLnZFrDmfvxY,6799
+scipy/optimize/tests/test__root.py,sha256=MvAzGJkaon4Hfk2BznRvFIVK05ezxezjvwmkEiEZFh8,4211
+scipy/optimize/tests/test__shgo.py,sha256=mUOxM4itGBJ025EOjzlbA1I_ncj3WDkO0j1MRxlptvM,40291
+scipy/optimize/tests/test__spectral.py,sha256=xh-4SMIAWkx_ND2nt7rGACy3ckfw_votfyfxMpQ8m2I,6664
+scipy/optimize/tests/test_bracket.py,sha256=tzlXzMl_36yeDtQV_oU5YH8IBzAJWPfss9QLc6JuqIs,30579
+scipy/optimize/tests/test_chandrupatla.py,sha256=04LrZHxJDpsSNDiZQg_0etOr1pChB-lP4_qLypTxJcA,30108
+scipy/optimize/tests/test_cobyla.py,sha256=PcQCKsaEsyEqgEzZ_T-eC5kTtSdfNekvapf6LeoZPJU,5271
+scipy/optimize/tests/test_constraint_conversion.py,sha256=vp-PUJNne1gnnvutl9mujO7HxnVcSMf5Ix3ti3AwDTI,11887
+scipy/optimize/tests/test_constraints.py,sha256=03SN10ubXpgrNq9Z4DEpPSC6hTXznW-YUF-nxdaxSQ4,9408
+scipy/optimize/tests/test_cython_optimize.py,sha256=n-HccBWoUmmBWq_OsNrAVnt4QrdssIYm4PWG29Ocias,2638
+scipy/optimize/tests/test_differentiable_functions.py,sha256=UtUepS5cJTIHZrSrX8g-74lP-aodwwgGRU0ShbBwf5E,27019
+scipy/optimize/tests/test_differentiate.py,sha256=Ide6nFAUD8KcWyQlV0SpF3PsmpZSPDlk7LI5LA1FEAs,15530
+scipy/optimize/tests/test_direct.py,sha256=dUfsmTx9phFmlwv93UYgjYBoHh-iuWUrdc_KBn7jGlY,13152
+scipy/optimize/tests/test_hessian_update_strategy.py,sha256=czoYotEPSbAfcKhjjf3a9BNJ7i78c4pWzBKCNifuPAY,10115
+scipy/optimize/tests/test_isotonic_regression.py,sha256=_qLmTpd3O9jI4qfFLYLxGiXAf3W5ON1xxro77Jr-GEM,7006
+scipy/optimize/tests/test_lbfgsb_hessinv.py,sha256=rpJbiCUfgJrjp-xVe4JiXjVNe6-l8-s8uPqzKROgmJQ,1137
+scipy/optimize/tests/test_lbfgsb_setulb.py,sha256=44caMVc_OSIthB1SLFPK-k2m0mMWxN4pMiJ-cDnqnLU,3599
+scipy/optimize/tests/test_least_squares.py,sha256=Ho5mgEuNB_t6Jj-M--wdN5e7SfgYnzXdZZZ3wOKETGQ,33951
+scipy/optimize/tests/test_linear_assignment.py,sha256=84d4YHCf9RzjYDKUujQe2GbudkP8dtlSpZtMBwCf_Oc,4085
+scipy/optimize/tests/test_linesearch.py,sha256=DVr7k5tkVpt2XkXwX2edFpRp1x15nfdcXFDK_Mb9XMk,10916
+scipy/optimize/tests/test_linprog.py,sha256=eizplKYRvUKzcXzmvA5n6wNoFN7wzQpCGxowmJl7TTY,96989
+scipy/optimize/tests/test_lsq_common.py,sha256=alCLPPQB4mrxLIAo_rn7eg9xrCEH7DerNBozSimOQRA,9500
+scipy/optimize/tests/test_lsq_linear.py,sha256=E41vtYzwf9Px1QZpm1ShC9GU_sU2X-Cn9apfn5pku6M,10861
+scipy/optimize/tests/test_milp.py,sha256=RDJe1CiL8-UMD8xqe4n2aVWp8qBe1hYufRx8qvad4wU,14553
+scipy/optimize/tests/test_minimize_constrained.py,sha256=c6_cxRer5aG0cXpBH7MwOfIjkPeyG7d5-bVnn9y_IjM,26520
+scipy/optimize/tests/test_minpack.py,sha256=EAarG7t3ucqklW4VWooF_7epPQcYdsocUmN5rjpuDMU,41341
+scipy/optimize/tests/test_nnls.py,sha256=McLnzzUcdj7qANpv1Ui3QQ4XPJfZvvhPtVSDOxU7zFU,19194
+scipy/optimize/tests/test_nonlin.py,sha256=IK7AjY9sWxEb6xwzE9IPnRi4VwQaCfTd9Wv0Pr7_lcs,18493
+scipy/optimize/tests/test_optimize.py,sha256=Qe1JeRz6sxM6Ndcoou_EvxPSzdB0TY3X3BhsYJcHRPs,123372
+scipy/optimize/tests/test_quadratic_assignment.py,sha256=zXttKYFREnrDhMExvBFNKzYb_77tFFsDlOPf-FP5XrA,16307
+scipy/optimize/tests/test_regression.py,sha256=CSg8X-hq6-6jW8vki6aVfEFYRUGTWOg58silM1XNXbU,1077
+scipy/optimize/tests/test_slsqp.py,sha256=KtqXxnMWsxI25GY-YT9BEZtgK9EkdLs_f5CRpXquiMQ,23258
+scipy/optimize/tests/test_tnc.py,sha256=ahSwu8F1tUcPV09l1MsbacUXXi1avQHzQNniYhZRf4s,12700
+scipy/optimize/tests/test_trustregion.py,sha256=HJtCc8Gdjznkzyn7Ei3XByBM_10pqv7VXgXBR9kCc8k,4701
+scipy/optimize/tests/test_trustregion_exact.py,sha256=DnuS71T8CyVKWOP6ib7jB2PQEjNf3O5r1DQ4fQCJSi0,12951
+scipy/optimize/tests/test_trustregion_krylov.py,sha256=DA169NkSqKMHdtDztMnlsrMZC3fnVlqkoKADMzGSWPg,6634
+scipy/optimize/tests/test_zeros.py,sha256=UzJWUB9wBdKpOAN0IQEMm3sYjANg9xtpQzqs_NV4Saw,35691
+scipy/optimize/tnc.py,sha256=5FKObWi_WSt7nFbOrt6MVkJQxZzCxZy_aStpnDV7okY,920
+scipy/optimize/zeros.py,sha256=cL-uiCpCIb28_C5a2O8oGOGC_5t836mICzkKDoMMgZY,789
+scipy/signal/__init__.py,sha256=Qi1hDJ8z3Zw5bdh3HK_Pj4H5aRgz7RML28_EqVC8ytY,13983
+scipy/signal/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/__pycache__/_arraytools.cpython-310.pyc,,
+scipy/signal/__pycache__/_bsplines.cpython-310.pyc,,
+scipy/signal/__pycache__/_czt.cpython-310.pyc,,
+scipy/signal/__pycache__/_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc,,
+scipy/signal/__pycache__/_ltisys.cpython-310.pyc,,
+scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc,,
+scipy/signal/__pycache__/_peak_finding.cpython-310.pyc,,
+scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc,,
+scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc,,
+scipy/signal/__pycache__/_signaltools.cpython-310.pyc,,
+scipy/signal/__pycache__/_spectral_py.cpython-310.pyc,,
+scipy/signal/__pycache__/_upfirdn.cpython-310.pyc,,
+scipy/signal/__pycache__/_waveforms.cpython-310.pyc,,
+scipy/signal/__pycache__/_wavelets.cpython-310.pyc,,
+scipy/signal/__pycache__/bsplines.cpython-310.pyc,,
+scipy/signal/__pycache__/filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/lti_conversion.cpython-310.pyc,,
+scipy/signal/__pycache__/ltisys.cpython-310.pyc,,
+scipy/signal/__pycache__/signaltools.cpython-310.pyc,,
+scipy/signal/__pycache__/spectral.cpython-310.pyc,,
+scipy/signal/__pycache__/spline.cpython-310.pyc,,
+scipy/signal/__pycache__/waveforms.cpython-310.pyc,,
+scipy/signal/__pycache__/wavelets.cpython-310.pyc,,
+scipy/signal/_arraytools.py,sha256=k3kHbl9RzcqsyftIYSFJZvJFL4zlcMAHyaRFUkFxOXY,8294
+scipy/signal/_bsplines.py,sha256=84tNZ2SuCWbh810Xu4Q084zsLvBptHU7fNGV_gZTYhY,15731
+scipy/signal/_czt.py,sha256=t5P1kRCM3iw3eCaL9hTgctMfQKezkqnjbghLjCkffQE,19445
+scipy/signal/_filter_design.py,sha256=JgYGAcpX4uhomSfJU5zQ-25bomkD9PqnXfMovbg32Ps,186602
+scipy/signal/_fir_filter_design.py,sha256=lcCVdgZytsIhVE1GdzksJ5sD2YPmD1D7EwvYOO52BIo,49381
+scipy/signal/_lti_conversion.py,sha256=GDo7lUK9QLv7PCKoblyvHXaEVtYbuKTwAmJ3OAuy4Tw,16142
+scipy/signal/_ltisys.py,sha256=g1c1oPuplyaQY0tfGGbq3XKfPUHNP0PW_G2AHoqJSLY,116354
+scipy/signal/_max_len_seq.py,sha256=8QkMWoYY3qy3bCKfsuXaS93Bnb2zd-ue6j5i5-3_hi0,5060
+scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so,sha256=1zYLmm8tjUSq5ZFYuzXmlGhNu6vh7MooPWSp8CXLEw4,77848
+scipy/signal/_peak_finding.py,sha256=d4y3__VSe9hPIueLZ_9xRKB9EnonvUOS6g1xp_WuxAY,48892
+scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so,sha256=junXT0_tsKF9WfjvK4TtoudFYKUKr1RPqrgfWnM034k,305464
+scipy/signal/_savitzky_golay.py,sha256=mnltOfknWRlNiZmNLLy-zKTCrw6nZSdJPEvpGi0kv8E,13417
+scipy/signal/_short_time_fft.py,sha256=jSd8xQrvHrJFyOVhcPJPduCThBvKXPLPuKcQDrOw5pE,73463
+scipy/signal/_signaltools.py,sha256=38oXczH1v4GT4pGVuI1WIYzOFYLHhO66C-SxGbV5ums,157590
+scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so,sha256=_DTzyCLkXWw51j738x5PMSR4c4BVozpShS8nQ6e_QOk,109008
+scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so,sha256=bHwx8VcEnIY45Mw6fbe3xsj6SZNPcHSRqWyKxCZ_N-M,303376
+scipy/signal/_spectral.cpython-310-x86_64-linux-gnu.so,sha256=ZK9JCPq7mK4YRg6O7TC2VKUUI14nQYrpP23uhjYPzk0,78176
+scipy/signal/_spectral_py.py,sha256=xRwdztzKYeYv0xIGYfGdxVeW3-DN5L0XJYFlWZjWm7o,78406
+scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so,sha256=Mz_jv3AkR6uNPMxR2jemmXQq1CJGLyOBvG8QLRdGW3A,85280
+scipy/signal/_upfirdn.py,sha256=ODSw2x1KHXN0vdKHm4vnovZxkoafcwIdUek0N8Edu5g,7882
+scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so,sha256=wApFc2zPuMZHuqDuY21pR1X8iNVhbe5GaJ4LlIy4DVM,394672
+scipy/signal/_waveforms.py,sha256=Bm5WOBhk1nXwK0A6yFVTY7tCCv6trdrUjje_xmM878Y,20523
+scipy/signal/_wavelets.py,sha256=NzmN785S0xFdgFhC4Lv52BKrvw3q3wtyVZdCditpDG8,16095
+scipy/signal/bsplines.py,sha256=xpwI33IQDzkH6S5o8ZxDtNj40dDD1G_tkpG4MaMMxQ4,738
+scipy/signal/filter_design.py,sha256=TRo01JzmAh6zpgVgZi_8pHLPM2DKo9fA9yDXpU5AOCM,1471
+scipy/signal/fir_filter_design.py,sha256=m74z7fwTgiYFfHdYd0NYVfpUnDIkNRVCG8nBaOoPVZ8,766
+scipy/signal/lti_conversion.py,sha256=fhyTsetZE9Pe57f9DeBdOIZwc71Nxw7j2Ovn6m7w2W0,707
+scipy/signal/ltisys.py,sha256=E5t7vHjsj09EYmpd27aqtRvT8E8sDpH-5YOgcmeqypI,1146
+scipy/signal/signaltools.py,sha256=ZnV0ARj_8YPUZ7cIxpM2Ko5yuOkW7Ic-JxN5uLmGcj8,1179
+scipy/signal/spectral.py,sha256=m_Q-gzRpT6e_w2kIBFKPBLuDVj5If5zfVWbAViAQJsk,723
+scipy/signal/spline.py,sha256=iisoUmgbyuuEukQjBz99HM3SYao7j1ZsXXmtE-wo5cU,810
+scipy/signal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/signal/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/_scipy_spectral_test_shim.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_short_time_fft.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_upfirdn.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_windows.cpython-310.pyc,,
+scipy/signal/tests/_scipy_spectral_test_shim.py,sha256=qkEcaCK7_jPHA7sellidJJs6rS6wo9xO9f5YkFdqBOQ,19995
+scipy/signal/tests/mpsig.py,sha256=DHB3eHB0KYA-E0SBebKG36YLk-T5egbwwryne3RwIHM,3308
+scipy/signal/tests/test_array_tools.py,sha256=J9Mr5DtqmhiTReWvsk3YclL6Cnv32bDuklBnw2zprJY,3632
+scipy/signal/tests/test_bsplines.py,sha256=7nnnsABF-uwKj13_Vq-CSbZJeIqx22j4yYySw83Q40o,8855
+scipy/signal/tests/test_cont2discrete.py,sha256=3IkRfgGlgnX7X0bERpExPAxAkcGK0h6Ovy6GyrhnYS8,14605
+scipy/signal/tests/test_czt.py,sha256=3HxxWwOWIrIc0GC-K5h6f0NRjkLrWRA5OhoB5y0zbw0,6993
+scipy/signal/tests/test_dltisys.py,sha256=f4wDe0rF_FATRWHkHddbPDOsFGV-Kv2Unz8QeOUUs-k,21558
+scipy/signal/tests/test_filter_design.py,sha256=whn5g9GR7BcsFjSMJyCMQFkrSWJoGSr9bhwEwmOGKP8,193782
+scipy/signal/tests/test_fir_filter_design.py,sha256=77Dt5heM2m9QTQ9VUZTeeSWnTi9cOjFbL-51CfNX-_8,29941
+scipy/signal/tests/test_ltisys.py,sha256=MbFugdbcNFZuzxcpjcVldhpaR64E0AaOg0qEWgPSMQQ,45208
+scipy/signal/tests/test_max_len_seq.py,sha256=X9oyCvW0Ny8hOAVX22HmKaMgi2oioe1cZWO3PTgPOgw,3106
+scipy/signal/tests/test_peak_finding.py,sha256=03S223wQ6xcJ_VyO6WCxthrFjWgatAmGKm6uTIZOlfk,33863
+scipy/signal/tests/test_result_type.py,sha256=25ha15iRfFZxy3nDODyOuvaWequyBpA42YNiiU43iAc,1627
+scipy/signal/tests/test_savitzky_golay.py,sha256=hMD2YqRw3WypwzVQlHwAwa3s6yJHiujXd_Ccspk1yNs,12424
+scipy/signal/tests/test_short_time_fft.py,sha256=h1xMjXJKr9HO1FEElm-D60uKPjPOckL7XOWhGH-fKtY,34474
+scipy/signal/tests/test_signaltools.py,sha256=rW7rMh50nQxlBWeQW104HUQWI8x6z9Me4C3Eruh0tk8,141443
+scipy/signal/tests/test_spectral.py,sha256=9IwUmrhRIynmcuCr-24LMH3HN9rcf2-49tP6bixkFEg,63775
+scipy/signal/tests/test_upfirdn.py,sha256=i3EjQKnwS6FRRRPPzwl1B_zWsQ20Dfa_6WUUYH8I3xM,11240
+scipy/signal/tests/test_waveforms.py,sha256=sTT0DeOER5U9h8Xp54VGvGlbtcxhp_wjGNQXw1yOaGM,11975
+scipy/signal/tests/test_wavelets.py,sha256=BurB2_FZ9rnLVJVhItmaueAUqlnmXR2POtFAJ-h3FLU,6721
+scipy/signal/tests/test_windows.py,sha256=tLnQi4VyekCfhV3Bn1mCY9pCVcDH6TbuYa7yiUI8rak,40990
+scipy/signal/waveforms.py,sha256=HHwdsb-_WPvMhFLAUohMBByHP_kgCL3ZJPY7IZuwprA,672
+scipy/signal/wavelets.py,sha256=ItCm-1UJc8s9y-_wMECmVUePpjW8LMSJVtZB-lFwVao,612
+scipy/signal/windows/__init__.py,sha256=BUSXzc_D5Agp59RacDdG6EE9QjkXXtlcfQrTop_IJwo,2119
+scipy/signal/windows/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/windows/__pycache__/_windows.cpython-310.pyc,,
+scipy/signal/windows/__pycache__/windows.cpython-310.pyc,,
+scipy/signal/windows/_windows.py,sha256=F-9DNB-71WE3WQOxVfNESgmc4gG21rDFgD631Y9-E78,83607
+scipy/signal/windows/windows.py,sha256=OztcTMqgFMLguY9-hVUvSSPMYY4GYkbrFvtsRcktxC8,879
+scipy/sparse/__init__.py,sha256=WClFuFd1byUOWhYZ6ZrjBsnKTwXEvjUJpVoMzbAvvv4,9272
+scipy/sparse/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/__pycache__/_base.cpython-310.pyc,,
+scipy/sparse/__pycache__/_bsr.cpython-310.pyc,,
+scipy/sparse/__pycache__/_compressed.cpython-310.pyc,,
+scipy/sparse/__pycache__/_construct.cpython-310.pyc,,
+scipy/sparse/__pycache__/_coo.cpython-310.pyc,,
+scipy/sparse/__pycache__/_csc.cpython-310.pyc,,
+scipy/sparse/__pycache__/_csr.cpython-310.pyc,,
+scipy/sparse/__pycache__/_data.cpython-310.pyc,,
+scipy/sparse/__pycache__/_dia.cpython-310.pyc,,
+scipy/sparse/__pycache__/_dok.cpython-310.pyc,,
+scipy/sparse/__pycache__/_extract.cpython-310.pyc,,
+scipy/sparse/__pycache__/_index.cpython-310.pyc,,
+scipy/sparse/__pycache__/_lil.cpython-310.pyc,,
+scipy/sparse/__pycache__/_matrix.cpython-310.pyc,,
+scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc,,
+scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc,,
+scipy/sparse/__pycache__/_sputils.cpython-310.pyc,,
+scipy/sparse/__pycache__/base.cpython-310.pyc,,
+scipy/sparse/__pycache__/bsr.cpython-310.pyc,,
+scipy/sparse/__pycache__/compressed.cpython-310.pyc,,
+scipy/sparse/__pycache__/construct.cpython-310.pyc,,
+scipy/sparse/__pycache__/coo.cpython-310.pyc,,
+scipy/sparse/__pycache__/csc.cpython-310.pyc,,
+scipy/sparse/__pycache__/csr.cpython-310.pyc,,
+scipy/sparse/__pycache__/data.cpython-310.pyc,,
+scipy/sparse/__pycache__/dia.cpython-310.pyc,,
+scipy/sparse/__pycache__/dok.cpython-310.pyc,,
+scipy/sparse/__pycache__/extract.cpython-310.pyc,,
+scipy/sparse/__pycache__/lil.cpython-310.pyc,,
+scipy/sparse/__pycache__/sparsetools.cpython-310.pyc,,
+scipy/sparse/__pycache__/spfuncs.cpython-310.pyc,,
+scipy/sparse/__pycache__/sputils.cpython-310.pyc,,
+scipy/sparse/_base.py,sha256=yXHwyNvhZYQ4JN7AxHwOR2zZPRzjBPzet_8Lv5WeKVE,52557
+scipy/sparse/_bsr.py,sha256=miltBWH6omnM8vuBeZqD9VoJ6xybgzRoz0F1xLLlbEs,30154
+scipy/sparse/_compressed.py,sha256=ul9lnyyKN2yaLKVs54CWIJYQYTlD6Seiftp_UXhxnok,53089
+scipy/sparse/_construct.py,sha256=S8avkP1bHGA5Hrufj2IldPqYXK1ls0GRUBdIRBpGfWw,47179
+scipy/sparse/_coo.py,sha256=A_6Le4-yfn20cx8rjSlzP1P-x6v7dptmNu-makDJoRk,31757
+scipy/sparse/_csc.py,sha256=oMNfti0VZ-OKJi-5THPcQCrj-vWFS3heJoGWUCyJ-EM,11057
+scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so,sha256=i0Ju-q3-Rgwi4xEtHjyg4wov_ZDYUktLgO-SsOUC2ko,823376
+scipy/sparse/_csr.py,sha256=nM2lgWRujXz_PhoinsooCfn0iqkzGS9aNm-Mapi3bus,15675
+scipy/sparse/_data.py,sha256=CbZVzMgio3OLAlLl2_1SlGHO7A2oXcdpAhKu1VgTlTI,17219
+scipy/sparse/_dia.py,sha256=cihl_869L2DSqjslBanJGhBgCYmnezBC8ZSdEAkZD8I,18755
+scipy/sparse/_dok.py,sha256=rL11rshTMrbm-SxkTa4XD90JSjnRCjdU48WPLSNExH8,22220
+scipy/sparse/_extract.py,sha256=iIRSqqVMiXfiacfswDCWXTjZCFfRvOz1NFicLUMHSl4,4987
+scipy/sparse/_index.py,sha256=c_Wt3XdFl9Zd6bAnfZ-pOCYHZ6VaB1a1duIh9xvYO50,13279
+scipy/sparse/_lil.py,sha256=zMhN5b7M0Yk1j1M5CS1hUcq7mt1x5POGHPAuxQkfoo4,20521
+scipy/sparse/_matrix.py,sha256=cT7Piq0NYzvRouy3HksG7d063HTjRlauBheAAT9PzCI,3081
+scipy/sparse/_matrix_io.py,sha256=dHzwMMqkdhWA8YTonemaZmVT66i3GiG46FBcsIDBbAY,6005
+scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so,sha256=7vpfiypFLkZVGMKWMGJu4XxaXv8CLhZVIDgs_ysZZqg,4466608
+scipy/sparse/_spfuncs.py,sha256=lDVTp6CiQIuMxTfSzOi3-k6p97ayXJxdKPTf7j_4GWc,1987
+scipy/sparse/_sputils.py,sha256=o3u434vbhJaoOE0ixhQQXJ_0T7ZqC-hmt5RmgFPm048,14545
+scipy/sparse/base.py,sha256=8Yx-QLKSRu9LJjgG-y8VqsRnsjImB2iKoJFxTgKGFsI,791
+scipy/sparse/bsr.py,sha256=CsYirxoLqHwBiEyNbOgGdZMx4Lt3adKZ-7uVv1gpzCY,811
+scipy/sparse/compressed.py,sha256=rbaz4AoTJvNnfnwEx4ocDXlkHJPOxe9DzqxCcJoHY2g,1009
+scipy/sparse/construct.py,sha256=i9lHBSRsDkvoNCbF9b7mZ0C2fHCjKU5CKCE30c-CxMc,925
+scipy/sparse/coo.py,sha256=VRF6kaYsVtyprwYrEuy1gRcCU5G7xsKyY0L1zJ_9JiQ,844
+scipy/sparse/csc.py,sha256=EV_LxYjPiRsTV6-J8kUefNna-R0tdI5uBt9Fj_XWlwc,609
+scipy/sparse/csgraph/__init__.py,sha256=VbNYhqSQ5ZPIPjU3Q9Q9MKTH1umiVu11GOjXNa1Cx68,7753
+scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc,,
+scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc,,
+scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so,sha256=LAliLFRkNi9po0xA86_zsGl4OIgtrAOSG6d6RLZY4gw,344976
+scipy/sparse/csgraph/_laplacian.py,sha256=n5iodxzmVtvbpcFLld-y-ZG3539uebImpMfIfnMhMck,18209
+scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so,sha256=2U1wR2L9IvxCUd8wwwA_WnUGmbZsJNGuItwVbmT8mrQ,347976
+scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so,sha256=mnQ3Ijgjil1ybq7RD4a8KSBAavBONxvpcWMAttrdkac,259472
+scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so,sha256=0nvEhVx41cCZZZ3oeF_BunnkG7EeAY46T_6nBRA8fdA,331936
+scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so,sha256=myjTgnUn1FHAJRZjAD7AdIf4J9TSY-VEEgzWxRkvLwE,484824
+scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so,sha256=ugHhgQ1RoYT6EXuEtxbEA9q2-531YUdVoir4Pgfy5qU,205312
+scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so,sha256=s_0NkoNPI5Hs13oLyDHZqsxzbCjeOauQ8__QIIMJdg8,658864
+scipy/sparse/csgraph/_validation.py,sha256=VQl6Aj3ns7AhLe3BDKp0-tRUXSzXOeD32wQ1eN7xnek,2476
+scipy/sparse/csgraph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/test_connected_components.py,sha256=a2HZjm7HsC0STqiDnhN6OJL4yIMcM28VNVtMXDI2BqE,3948
+scipy/sparse/csgraph/tests/test_conversions.py,sha256=KJ6jEAYl5C8APyH_WE5I1M8qGgxOyjGtNPf9rt4RYCo,1856
+scipy/sparse/csgraph/tests/test_flow.py,sha256=BXhx0qBT3Ijy9all5OhNVNVzMbdTPySQuaZ1ajK6DTs,7420
+scipy/sparse/csgraph/tests/test_graph_laplacian.py,sha256=6fDEldaGM_gEZk-NMHaeQMKjZRnz3J7R5kWqHhfchY0,10990
+scipy/sparse/csgraph/tests/test_matching.py,sha256=MkSKU_9_IIhRnhp5sbRbB8RYqVe_keS4xqhDVvV3EhM,11944
+scipy/sparse/csgraph/tests/test_pydata_sparse.py,sha256=eoiFT4O_myDq2hVHM3A2qkwL5t8hv3XwRLhXwC4ZmHE,3601
+scipy/sparse/csgraph/tests/test_reordering.py,sha256=by-44sshHL-yaYE23lDp1EqnG-72MRbExi_HYSMJEz8,2613
+scipy/sparse/csgraph/tests/test_shortest_path.py,sha256=RmRAk_RxMo3C9do0f01DsHSPyDUVEUZXuq4h6aALrDo,14441
+scipy/sparse/csgraph/tests/test_spanning_tree.py,sha256=7Zcbj_87eeAkm6RetgeO0wVp1EOIEjGxJLuGtw_H9qc,2168
+scipy/sparse/csgraph/tests/test_traversal.py,sha256=UNTZXJ9bjDHcji_vUa1Ye5Kbp6xLfyHBG9LusToGUSY,2840
+scipy/sparse/csr.py,sha256=9UrWUoq5-hSl9bcaVeWxN4tmPJisTQ_6JiISCyrlMCw,658
+scipy/sparse/data.py,sha256=qGDAuAvTASgQ7wXXZ9t2JPp0rNBNVxObTTzXNHDRSEo,573
+scipy/sparse/dia.py,sha256=0y5_QfvVeU5doVbngvf8G36qVGU-FlnUxRChQ43e1aU,689
+scipy/sparse/dok.py,sha256=LMnaLFd266EZ3p4D1ZgOICGRZkY6s7YM0Wvlr6ylRn0,733
+scipy/sparse/extract.py,sha256=6qT2PNOilsEhDWl6MhmgpveIuQr4QCs3LATwIrBroOQ,567
+scipy/sparse/lil.py,sha256=BbnMgvzMi33OqmBNYF_VDPeju2RcRs9OyZUUU3aZHcc,734
+scipy/sparse/linalg/__init__.py,sha256=_2NSGBqWo-MaV_ZiFDzXRYTM9eW8RfmtSWVp4WMESyw,3999
+scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__init__.py,sha256=YxlWZfj2dxiZrFLL6Oj6iWKEuC6OHXdRVRf9xCU_Zoo,1991
+scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/_add_newdocs.py,sha256=ASCr6jhvN8hgJCEg9Qq685LXKJuGTvFQCZtUwzWphDk,3912
+scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so,sha256=UOaqh-gu9w_8RGj1Pb4ZdPXjHOM9JvPy5o4vSWn42BI,378961
+scipy/sparse/linalg/_dsolve/linsolve.py,sha256=Iro6NQavwUGTmib9d3UOPBQAXXCVpplzfCiqRDS6nh0,26486
+scipy/sparse/linalg/_dsolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/tests/test_linsolve.py,sha256=632NbRmJm2-8vbQ6g9pFiMsApZ01tIGveNfP0BUjVXo,27784
+scipy/sparse/linalg/_eigen/__init__.py,sha256=SwNho3iWZu_lJvcdSomA5cQdcDU8gocKbmRnm6Bf9-0,460
+scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/_svds.py,sha256=3N36RCFHqkYraaY7Fc7WoN-w9_7c1cG0QnlWYAJaroA,20239
+scipy/sparse/linalg/_eigen/_svds_doc.py,sha256=3_mPNg5idszebdDr-3z_39dX3KBmX2ui1PCCP_hPF24,15605
+scipy/sparse/linalg/_eigen/arpack/COPYING,sha256=CSZWb59AYXjRIU-Mx5bhZrEhPdfAXgxbRhqLisnlC74,1892
+scipy/sparse/linalg/_eigen/arpack/__init__.py,sha256=zDxf9LokyPitn3_0d-PUXoBCh6tWK0eUSvsAj6nkXI0,562
+scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so,sha256=BLVsy5NYNQUJA_GGLA0Xt6Ovliza3b3pBham9OOqN28,486441
+scipy/sparse/linalg/_eigen/arpack/arpack.py,sha256=BSkXtfwvmUtmBejugJkE2LOPeGtV-Ms7TxXHIpD_Rx8,67401
+scipy/sparse/linalg/_eigen/arpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py,sha256=R5FfNhm1CZNVMiP_ldOp5x_0pzpwCJlO68FPW_pR8vw,23750
+scipy/sparse/linalg/_eigen/lobpcg/__init__.py,sha256=E5JEPRoVz-TaLrj_rPm5LP3jCwei4XD-RxbcxYwf5lM,420
+scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py,sha256=CdmO8VQrARiE1i8VJvE4O0tYytbzQCzDIf3eo1sWq6g,41905
+scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py,sha256=TVAhSqfKVm-T05Nx-eIJfMMyf8P-XEyZv_r9YSrHuZo,23813
+scipy/sparse/linalg/_eigen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/tests/test_svds.py,sha256=0fxAvOZN6Jet3H8dAlq0je1MS5THhGv0l4dv1ZYoUFw,36157
+scipy/sparse/linalg/_expm_multiply.py,sha256=enIS-h-6F6UQ6SQeR57bH8MYbM4XzwQv5dVqlWVqhJU,26312
+scipy/sparse/linalg/_interface.py,sha256=drcxlR1TUiZ1sEat2ke6bh62DPIe888Xd1QagqHMlq8,27979
+scipy/sparse/linalg/_isolve/__init__.py,sha256=Z_eQUYbe6RWMSNi09T9TfPEWm8RsVxcIKYAlihM-U-c,479
+scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/_gcrotmk.py,sha256=j2JVJBMs8u72hwF0jueRIfkJlS4ZtUZHt0TXYzWXcUY,16212
+scipy/sparse/linalg/_isolve/iterative.py,sha256=T2ebi650XYuxLcE90_vvNhnmDKNn4yCMEHy2fQyqFMM,35768
+scipy/sparse/linalg/_isolve/lgmres.py,sha256=_HXq4vrLuoo2cvjZIgJ9_NJPQnpaQNoGcrUFQdhgQto,9159
+scipy/sparse/linalg/_isolve/lsmr.py,sha256=ej51ykzoqpWvyksTFISRN-lXce7InPpqyDT4N42QEpY,15653
+scipy/sparse/linalg/_isolve/lsqr.py,sha256=mJADMPk_aL_lf57tkaTydK4lYhkszmHf2-4jHJEe8Vs,21214
+scipy/sparse/linalg/_isolve/minres.py,sha256=lz5MBEKkTIjhiBnWoJ6WhNXGkKiYRKnt2FAI2MNvsmM,11611
+scipy/sparse/linalg/_isolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py,sha256=M5lrn0JBRUmo6ug2p1SgDtm7PAbU6potiJzRy-wT68Q,5413
+scipy/sparse/linalg/_isolve/tests/test_iterative.py,sha256=g2dEqDPRJUuesDn8FrTOQxkZ2wMaOWGao7z7lShV1Ng,25626
+scipy/sparse/linalg/_isolve/tests/test_lgmres.py,sha256=hAjJLuBtyLMCCqK_uZbTVGnsFACsLZHgtiHdUABRO3Q,7064
+scipy/sparse/linalg/_isolve/tests/test_lsmr.py,sha256=6bQA3WdneycfXx6aZyFdPjWRUSXm_Smjh9YcJo8R-4E,6365
+scipy/sparse/linalg/_isolve/tests/test_lsqr.py,sha256=IG6FaJjYU_0QYYCBC4yjNiZldi1ZafIITDKnESTScCo,3754
+scipy/sparse/linalg/_isolve/tests/test_minres.py,sha256=7h3A3dzQV9_jqYrNdulAAJnzZ5icw_HBnTXNXnUdUto,2435
+scipy/sparse/linalg/_isolve/tests/test_utils.py,sha256=VlmvctRaQtjuYvQuoe2t2ufib74Tua_7qsiVrs3j-p0,265
+scipy/sparse/linalg/_isolve/tfqmr.py,sha256=SpMqzbNeYBgMU6DYgQyV2SbGlnal6d1iMysAILQj_pI,6689
+scipy/sparse/linalg/_isolve/utils.py,sha256=I-Fjco_b83YKUtZPVdobTjPyY41-2SHruVvKZVOIXaU,3598
+scipy/sparse/linalg/_matfuncs.py,sha256=wib0cFQFGX9CylfenGMGdDskE5XJ_LTC_OWpLJcfIZY,29385
+scipy/sparse/linalg/_norm.py,sha256=y4J98m4JBfHI67lZNsF93SUIiy4JHwhFElFjuZE_twg,6067
+scipy/sparse/linalg/_onenormest.py,sha256=47p9H_75GVy3AobAmpgYQQI3Nm7owHVil6ezu42PHsQ,15486
+scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so,sha256=SlYCZgv47DhHlVImmktzifsuFrQjbVrwSNNJnJoVeeo,158281
+scipy/sparse/linalg/_propack/_dpropack.cpython-310-x86_64-linux-gnu.so,sha256=POglN_81l6zE-ZLoBfFLvRzAplvMi448eie1-liotTQ,133633
+scipy/sparse/linalg/_propack/_spropack.cpython-310-x86_64-linux-gnu.so,sha256=h3dW6gZeeshDj26Qmz6aNcCZeWxo82isE4lkBtIxhyM,133633
+scipy/sparse/linalg/_propack/_zpropack.cpython-310-x86_64-linux-gnu.so,sha256=y9SyEYpI32lN02fWyYrRH_8jtGxPmFvc_Yru01nAJUI,150089
+scipy/sparse/linalg/_special_sparse_arrays.py,sha256=7jnMobVkXaYQeHODLmaTFwAL-uC-LVda5D1vz-vpz3A,34298
+scipy/sparse/linalg/_svdp.py,sha256=3_w6ECB1W0LiFoS400LCtx0NXwKPJETmoF9X1JZ07uI,11415
+scipy/sparse/linalg/dsolve.py,sha256=iR9kBE3U5eVFBVJW8bpEGEhFFfR6PiI-NIbqKzLT8U4,697
+scipy/sparse/linalg/eigen.py,sha256=SItXs6TCDv9zJFnj8_KyBzJakRC2oeIGDqVEs0sHmzQ,664
+scipy/sparse/linalg/interface.py,sha256=JHIM0cIQUEzMmUqhkU69hTy6seeG648_l2XI39nmLvs,682
+scipy/sparse/linalg/isolve.py,sha256=BWvUveL2QGKFxqVGDFq2PpGEggkq204uPYs5I83lzgY,671
+scipy/sparse/linalg/matfuncs.py,sha256=zwrqI0IwAPhQt6IIJ-oK5W_ixhGMGcYVGcSr2qU6lFI,697
+scipy/sparse/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_expm_multiply.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_interface.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_norm.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_onenormest.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_propack.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_special_sparse_arrays.cpython-310.pyc,,
+scipy/sparse/linalg/tests/propack_test_data.npz,sha256=v-NNmpI1Pgj0APODcTblU6jpHUQRhpE9ObWb-KYnu6M,600350
+scipy/sparse/linalg/tests/test_expm_multiply.py,sha256=EN5HcjT92SgJuTHX89Ebh-OIgrrR0UVxjcrPYmNAN60,13955
+scipy/sparse/linalg/tests/test_interface.py,sha256=MmCzkRdcaIy2DUOYRFRv8px_Hk68AFdepBe8ivbSXLA,17953
+scipy/sparse/linalg/tests/test_matfuncs.py,sha256=gPpXsIUZg97wL_fzHodNMyswgZ0h9nqxTqxFu8_3bL0,21885
+scipy/sparse/linalg/tests/test_norm.py,sha256=8waDQ-csiw4jTIQPz8qlseqgosvjY9OHfAU7lJ8yLxo,6163
+scipy/sparse/linalg/tests/test_onenormest.py,sha256=EYUVD6i7RGiMi_bclm1_4YkLZSAma5CHqRH9YeDvtwM,9227
+scipy/sparse/linalg/tests/test_propack.py,sha256=Tvcx6MliY6i_Px0KlKfGwjFCElH5y2Arekm7WVAhKqI,5539
+scipy/sparse/linalg/tests/test_pydata_sparse.py,sha256=fqGKTw7gnPyHQ47mlWjL5wDEPZ2i8gbzpZvwPHHc5OQ,6213
+scipy/sparse/linalg/tests/test_special_sparse_arrays.py,sha256=2Z7r1LPx7QTekuXNTLcspGOdJ9riRwioGIpxzIa0Kh4,12854
+scipy/sparse/sparsetools.py,sha256=0d2MTFPJIvMWcTfWTSKIzP7AiVyFGS76plzgzWSXGuQ,2168
+scipy/sparse/spfuncs.py,sha256=zcwv-EvwXW-_7kjRJqNm-ZoKbDcxlU4xOuvl3iBWao0,582
+scipy/sparse/sputils.py,sha256=coz-V4p4Vg2eT1yc3sZF6_7FXKvj2ZuP7QKhPF4UEb0,973
+scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_common1d.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_coo.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_deprecations.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_dok.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_minmax1d.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc,,
+scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQcZhNAfo,846
+scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851
+scipy/sparse/tests/test_array_api.py,sha256=OWXlJJzLgz9LdbLyJ8PrOaAdDRR8-xJs067jY37AwqI,14465
+scipy/sparse/tests/test_base.py,sha256=ns97Qb0d96Bkts3VnayHqYg8K9t7qYQBvmvq6UP2vpM,190463
+scipy/sparse/tests/test_common1d.py,sha256=uMbeYmB-FcSE2gQ8tC4CvptRalUDOPNy3amXTDQ34EQ,15613
+scipy/sparse/tests/test_construct.py,sha256=6J4zV_rbj-eO7rLiR4kF_3nxf1sf82lzxOzHFif91iM,33414
+scipy/sparse/tests/test_coo.py,sha256=opa1NGLbCzMDMIbuxS1nn7kFhFx1cu1WLQTJg8SZe04,8477
+scipy/sparse/tests/test_csc.py,sha256=rB2cBXznxPdQbMZpdQyQitUdCdEeO6bWt7tQ_LBGGDw,2958
+scipy/sparse/tests/test_csr.py,sha256=efYU3H8Mm3GIB0ZRxXQCZixFo2OB56AR016k-bz33tY,6488
+scipy/sparse/tests/test_deprecations.py,sha256=g4bw2bVauWSGt4e0yvDJ1MMkqDtp97kTl77EXwyDsIs,645
+scipy/sparse/tests/test_dok.py,sha256=iGzGJVnfC-aLW7Ra2GXJv8COW8V-bBc2nphTTcXcDZU,5761
+scipy/sparse/tests/test_extract.py,sha256=4qUPrtCv9H7xd-c9Xs51seQCiIlK45n-9ZEVTDuPiv8,1685
+scipy/sparse/tests/test_matrix_io.py,sha256=sLyFQeZ8QpiSoTM1A735j-LK4K0MV-L7VnWtNaBJhw4,3305
+scipy/sparse/tests/test_minmax1d.py,sha256=HNR0aaPGesVzenx_iXNKTs9bMoGomckk7aeUscjnGx0,2375
+scipy/sparse/tests/test_sparsetools.py,sha256=zKeUESux895mYLdhhW_uM5V1c-djdEKnZ-xURx5fNrw,10543
+scipy/sparse/tests/test_spfuncs.py,sha256=ECs34sgYYhTBWe4hIkx357obH2lLsnJWkh7TfacjThw,3258
+scipy/sparse/tests/test_sputils.py,sha256=h8YJ7QKigGy49OPf_X8KZBF3ZmB5RN3BjghNeMGg3rI,7286
+scipy/spatial/__init__.py,sha256=SOzwiLe2DZ3ymTbCiSaYRG81hJfeqSFy5PcccZ3Cwn0,3697
+scipy/spatial/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc,,
+scipy/spatial/__pycache__/_kdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/_plotutils.cpython-310.pyc,,
+scipy/spatial/__pycache__/_procrustes.cpython-310.pyc,,
+scipy/spatial/__pycache__/_spherical_voronoi.cpython-310.pyc,,
+scipy/spatial/__pycache__/ckdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/distance.cpython-310.pyc,,
+scipy/spatial/__pycache__/kdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/qhull.cpython-310.pyc,,
+scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so,sha256=EDFGE2PDdyqaMgMGkzQbESCT_5ieq249BCE9aHPCNdk,1027824
+scipy/spatial/_ckdtree.pyi,sha256=rt73FClv4b7Ua0TcIj4gLWWfiNrETMlCFnyqTXzeAQM,5892
+scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so,sha256=OVLqkiGCWJfBu2EupE192IeoHgdh2IIy-vr1Y9eWW18,641232
+scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so,sha256=oB_9nLLv1_nIJWpVmTOtfKC9wwFJHopahlk-IEz96Ok,113256
+scipy/spatial/_geometric_slerp.py,sha256=WdTteqZuTzrW-ZMXTKehWTplaOJrtqQimAIWWAaW5vM,7981
+scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so,sha256=M_21smS0-G27W9YwYHvZx2mu-x50yCyHVXO6bvmWtF4,250088
+scipy/spatial/_kdtree.py,sha256=9k5hOuUrM7vnVTUp4_IKCJAjaKekCB378inhmYgeBQQ,33443
+scipy/spatial/_plotutils.py,sha256=hESt827uWjj14yGCsRCLrpa_oMUMwGJZ0DNRNDPGTfo,7259
+scipy/spatial/_procrustes.py,sha256=oj1TnlLsBxlLVXvn7zG5nymeHxQkRMSDzgjsLZGg-9A,4429
+scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so,sha256=0qF76iZb6kg6ppU9G_dfTyg9XKPJNUfvzchCQsrVS6o,1163696
+scipy/spatial/_qhull.pyi,sha256=dmvze3QcaoA_Be6H8zswajVatOPwtJFIFxoZFE9qR-A,5969
+scipy/spatial/_spherical_voronoi.py,sha256=x3TrK6tTkKwfSSSWcdkBOZ9i042t1Hn21oom4aES15U,13539
+scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so,sha256=FMRMxVzZ-dLVhUHOfaBHSoCoCP2TmyHLM7FFigsxbHE,241008
+scipy/spatial/_voronoi.pyi,sha256=aAOiF4fvHz18hmuSjieKkRItssD443p2_w1ggXOIs1g,126
+scipy/spatial/ckdtree.py,sha256=uvC-phcjhzmGLLcE_tKHPn6zrTTjGwVSren0M4jSPng,645
+scipy/spatial/distance.py,sha256=QVH_K3qK3MvElGaoMimK3VNyFmwnuGdq0MvoRumsKRw,91483
+scipy/spatial/distance.pyi,sha256=f9eGCqRUYrQt7gI37JnARDn1FkIVsKRlinx2onMshZQ,5273
+scipy/spatial/kdtree.py,sha256=Wlqqnd9uwGZ1t7UoL4uIzUhSYo247jaOpokehDGj66o,655
+scipy/spatial/qhull.py,sha256=aFE-KscuINt6QIhFC2dqhwFCYu3HSBkVXDH5exHH71s,622
+scipy/spatial/qhull_src/COPYING.txt,sha256=NNsMDE-TGGHXIFVcnNei4ijRKQuimvDy7oDEG7IDivs,1635
+scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test__plotutils.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test__procrustes.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_distance.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_hausdorff.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_kdtree.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_qhull.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_slerp.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_spherical_voronoi.cpython-310.pyc,,
+scipy/spatial/tests/data/cdist-X1.txt,sha256=ULnYAgX2_AwOVF-VE7XfnW5S0pzhx7UAoocxSnXMaWs,5750
+scipy/spatial/tests/data/cdist-X2.txt,sha256=_IJVjXsp3pvd8NNPNTLmVbHOrzl_RiEXz7cb86NfvZ4,11500
+scipy/spatial/tests/data/degenerate_pointset.npz,sha256=BIq8Hd2SS_LU0fIWAVVS7ZQx-emVRvvzgnaO2lh4gXU,22548
+scipy/spatial/tests/data/iris.txt,sha256=k19QSfkqhMmByqNMzwWDmM6wf5dt6whdGyfAyUO3AW0,15000
+scipy/spatial/tests/data/pdist-boolean-inp.txt,sha256=5Z9SMsXrtmzeUwJlVmGkrPDC_Km7nVpZIbBl7p3Hdc0,50000
+scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt,sha256=Yerj1wqIzcdyULlha-q02WBNGyS2Q5o2wAr0XVEkzis,178801
+scipy/spatial/tests/data/pdist-chebyshev-ml.txt,sha256=NEd2b-DONqUMV9f8gJ2yod17C_5fXGHHZ38PeFsXkyw,3041
+scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt,sha256=UCWZJeMkMajbpjeG0FW60b0q-4r1geAyguNY6Chx5bM,178801
+scipy/spatial/tests/data/pdist-cityblock-ml.txt,sha256=8Iq7cF8oMJjpqd6qsDt_mKPQK0T8Ldot2P8C5rgbGIU,3041
+scipy/spatial/tests/data/pdist-correlation-ml-iris.txt,sha256=l2kEAu0Pm3OsFJsQtHf9Qdy5jnnoOu1v3MooBISnjP0,178801
+scipy/spatial/tests/data/pdist-correlation-ml.txt,sha256=S4GY3z-rf_BGuHmsnColMvR8KwYDyE9lqEbYT_a3Qag,3041
+scipy/spatial/tests/data/pdist-cosine-ml-iris.txt,sha256=hQzzoZrmw9OXAbqkxC8eTFXtJZrbFzMgcWMLbJlOv7U,178801
+scipy/spatial/tests/data/pdist-cosine-ml.txt,sha256=P92Tm6Ie8xg4jGSP7k7bmFRAP5MfxtVR_KacS73a6PI,3041
+scipy/spatial/tests/data/pdist-double-inp.txt,sha256=0Sx5yL8D8pyYDXTIBZAoTiSsRpG_eJz8uD2ttVrklhU,50000
+scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt,sha256=3-UwBM7WZa4aCgmW_ZAdRSq8KYMq2gnkIUqU73Z0OLI,178801
+scipy/spatial/tests/data/pdist-euclidean-ml.txt,sha256=rkQA2-_d7uByKmw003lFXbXNDjHrUGBplZ8nB_TU5pk,3041
+scipy/spatial/tests/data/pdist-hamming-ml.txt,sha256=IAYroplsdz6n7PZ-vIMIJ4FjG9jC1OSxc3-oVJdSFDM,3041
+scipy/spatial/tests/data/pdist-jaccard-ml.txt,sha256=Zb42SoVEnlTj_N_ndnym3_d4RNZWeHm290hTtpp_zO8,3041
+scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt,sha256=L7STTmlRX-z-YvksmiAxEe1UoTmDnQ_lnAjZH53Szp0,172738
+scipy/spatial/tests/data/pdist-jensenshannon-ml.txt,sha256=-sZUikGMWskONojs6fJIMX8VEWpviYYg4u1vipY6Bak,2818
+scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt,sha256=N5L5CxRT5yf_vq6pFjorJ09Sr-RcnrAlH-_F3kEsyUU,178801
+scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt,sha256=DRgzqxRtvQVzFnpFAjNC9TDNgRtk2ZRkWPyAaeOx3q4,3041
+scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt,sha256=jz7SGKU8GuJWASH2u428QL9c-G_-8nZvOFSOUlMdCyA,178801
+scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt,sha256=37H01o6GibccR_hKIwwbWxGX0Tuxnb-4Qc6rmDxwwUI,178801
+scipy/spatial/tests/data/pdist-seuclidean-ml.txt,sha256=YmcI7LZ6i-Wg1wjAkLVX7fmxzCj621Pc5itO3PvCm_k,3041
+scipy/spatial/tests/data/pdist-spearman-ml.txt,sha256=IrtJmDQliv4lDZ_UUjkZNso3EZyu7pMACxMB-rvHUj0,3041
+scipy/spatial/tests/data/random-bool-data.txt,sha256=MHAQdE4hPVzgu-csVVbm1DNJ80dP7XthJ1kb2In8ImM,6000
+scipy/spatial/tests/data/random-double-data.txt,sha256=GA8hYrHsTBeS864GJf0X6JRTvGlbpM8P8sJairmfnBU,75000
+scipy/spatial/tests/data/random-int-data.txt,sha256=xTUbCgoT4X8nll3kXu7S9lv-eJzZtwewwm5lFepxkdQ,10266
+scipy/spatial/tests/data/random-uint-data.txt,sha256=8IPpXhwglxzinL5PcK-PEqleZRlNKdx3zCVMoDklyrY,8711
+scipy/spatial/tests/data/selfdual-4d-polytope.txt,sha256=rkVhIL1mupGuqDrw1a5QFaODzZkdoaLMbGI_DbLLTzM,480
+scipy/spatial/tests/test__plotutils.py,sha256=fASbg0i7iLiJIEj5vIkiDuTq3wU0z3mKJY019kzKrFk,3814
+scipy/spatial/tests/test__procrustes.py,sha256=wmmnUHRdw_oID0YLi404IEWPH6vEGhvHXSeGPY_idHo,4974
+scipy/spatial/tests/test_distance.py,sha256=m0lxDXuZWREXE-k_yMHUddKqnmbRKo-g-VoVEE2Xez0,84153
+scipy/spatial/tests/test_hausdorff.py,sha256=n-Qm2gVF0zc11tDSCnXBznt5Mp0E1ekTtzfWXjqG54M,7114
+scipy/spatial/tests/test_kdtree.py,sha256=ZlrKMS1JEdkbwFE8WtEMPI3W5H8ldfPjz1D23fcrsKM,49270
+scipy/spatial/tests/test_qhull.py,sha256=v_GB-IN6UdcNdsOQtQUYDnHKNyGAq_4wYkFicEe4-hQ,43989
+scipy/spatial/tests/test_slerp.py,sha256=hYH-2ROq0iswTsli4c-yBLZfACvQL0QVCKrPWTeBNls,16396
+scipy/spatial/tests/test_spherical_voronoi.py,sha256=Ydof8dYsSoYfII5lVDJ82iVynrruwuBdg0_oESw8YoY,14492
+scipy/spatial/transform/__init__.py,sha256=vkvtowJUcu-FrMMXjEiyfnG94Cqwl000z5Nwx2F8OX0,700
+scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc,,
+scipy/spatial/transform/_rotation.cpython-310-x86_64-linux-gnu.so,sha256=yGDxsus_6GKKrlwjSWVsXT-LM8MxDZRTcfX61AKOfYk,987072
+scipy/spatial/transform/_rotation.pyi,sha256=SI2NWoIjma0P-DaicaLVeRtafg8_SUvJeXOry2bVa5A,3080
+scipy/spatial/transform/_rotation_groups.py,sha256=XS-9K6xYnnwWywMMYMVznBYc1-0DPhADHQp_FIT3_f8,4422
+scipy/spatial/transform/_rotation_spline.py,sha256=M2i8qbPQwQ49D3mNtqll31gsCMqfqBJe8vOxMPRlD5M,14083
+scipy/spatial/transform/rotation.py,sha256=eVnQRbOorImPet4qbF0W95z_ptTNR80LSLRT2jBZAc8,612
+scipy/spatial/transform/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/spatial/transform/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation_groups.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc,,
+scipy/spatial/transform/tests/test_rotation.py,sha256=TEyEEVsT4Qd-14wxSxF1mcUO4smcK6gZgu-GXGGfXqw,61014
+scipy/spatial/transform/tests/test_rotation_groups.py,sha256=V6DiLWvJsrdklhS-GlzcA9qEy0cTQpwaNR-7vkhBt1M,5560
+scipy/spatial/transform/tests/test_rotation_spline.py,sha256=g3prW5afu_yJxevIz2LMdRFYLfe8zq-3b6TMGw06Ads,5105
+scipy/special.pxd,sha256=l9Y21wnx5fZLvrxCeCMUWQvBI5gHx7LBhimDWptxke8,42
+scipy/special/__init__.py,sha256=8RBpMhRlS6fAXj1PH0Rj6KkfdTC4E2skg3vZrZ2Q0cs,31975
+scipy/special/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/special/__pycache__/_basic.cpython-310.pyc,,
+scipy/special/__pycache__/_ellip_harm.cpython-310.pyc,,
+scipy/special/__pycache__/_lambertw.cpython-310.pyc,,
+scipy/special/__pycache__/_logsumexp.cpython-310.pyc,,
+scipy/special/__pycache__/_mptestutils.cpython-310.pyc,,
+scipy/special/__pycache__/_orthogonal.cpython-310.pyc,,
+scipy/special/__pycache__/_sf_error.cpython-310.pyc,,
+scipy/special/__pycache__/_spfun_stats.cpython-310.pyc,,
+scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc,,
+scipy/special/__pycache__/_support_alternative_backends.cpython-310.pyc,,
+scipy/special/__pycache__/_testutils.cpython-310.pyc,,
+scipy/special/__pycache__/add_newdocs.cpython-310.pyc,,
+scipy/special/__pycache__/basic.cpython-310.pyc,,
+scipy/special/__pycache__/orthogonal.cpython-310.pyc,,
+scipy/special/__pycache__/sf_error.cpython-310.pyc,,
+scipy/special/__pycache__/specfun.cpython-310.pyc,,
+scipy/special/__pycache__/spfun_stats.cpython-310.pyc,,
+scipy/special/_add_newdocs.py,sha256=cWyckQIFsSlIkK6swKC0OcWx0ZKlLtlC4D-bLVx-6h4,398483
+scipy/special/_basic.py,sha256=CKWvRFOjr4EhKlzbUf6S0xqolq6yZNC0FgfwupXmxIY,103790
+scipy/special/_cdflib.cpython-310-x86_64-linux-gnu.so,sha256=1L-npBimaXutX3FF_gXvaDaqPtjqgMfqvXbZyFlgc-E,187520
+scipy/special/_comb.cpython-310-x86_64-linux-gnu.so,sha256=NAq1jPghJ33K5HTGHQaFRef2kD1eA5cOP57hXpAgvdM,63456
+scipy/special/_ellip_harm.py,sha256=YHHFZXMtzdJxyjZXKsy3ocIsV-eg6ne3Up79BuFl9P8,5382
+scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so,sha256=Yg4gDMzAzxYplmCKBME9ZJtaY3khUhmdORc6DiTIeSk,138121
+scipy/special/_lambertw.py,sha256=-oSEnHFQWZiUZXMamxPWjfntWq5tt0rzHmI13DxGHBY,3962
+scipy/special/_logsumexp.py,sha256=2MyHR5PWo83qt5RrEnXWRCcWS55gy2s5UWDu30LUvaQ,9027
+scipy/special/_mptestutils.py,sha256=Yl_tYnFW1j2DbH6I-2MBNjjqt4WiDO-phVWyNj1Hpfw,14441
+scipy/special/_orthogonal.py,sha256=jcOgiGPDzhAsxeEmoYhTSDHZ_uSE5TNiG1yTvAliuXI,74558
+scipy/special/_orthogonal.pyi,sha256=XATMiU9ri9e39B5YANXPyQkMqWtfu5rDIP4NA7WSQTU,8304
+scipy/special/_precompute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/utils.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc,,
+scipy/special/_precompute/cosine_cdf.py,sha256=ZGSeDDpLRsapyx2GbIrqqYR98fvaEQrLn7IE-fuodhE,354
+scipy/special/_precompute/expn_asy.py,sha256=JAz0hY1gBJu3Q_dvscQrSJdgKuwpjqFZVwz-sOQQ21w,1265
+scipy/special/_precompute/gammainc_asy.py,sha256=P5OFRcPkkpjGQeYCaMZ8SFSUmZG_CjrEHv8OLwgcGFc,2502
+scipy/special/_precompute/gammainc_data.py,sha256=Y5taFAdCE3W14bavUACTA3XoCxyh7_Z2NHcs-DKS75E,4077
+scipy/special/_precompute/lambertw.py,sha256=7f4F3ivouVNZwuvVX8TAi2lPB7LirPS8IfN5lEw9zI0,1961
+scipy/special/_precompute/loggamma.py,sha256=iq7ZBrUmk8pXYZwO_wINI4u8ENsLbL9VUShGjGO0Pt0,1094
+scipy/special/_precompute/struve_convergence.py,sha256=z7R0Q5_Ye-EqLI9g-yARdl_j5FooofXMRXPLVrIFJQQ,3624
+scipy/special/_precompute/utils.py,sha256=JXJuI07Jlm4bDHJFVtj0jHq05p-V1ofeXZB16Y05kzI,887
+scipy/special/_precompute/wright_bessel.py,sha256=7z2W3spGANZO31r_xauMA6hIQ0eseRlXx-zJW6du5tU,12868
+scipy/special/_precompute/wright_bessel_data.py,sha256=f1id2Gk5TPyUmSt-Evhoq2_hfRgLUU7Qu_mELKtaXGg,5647
+scipy/special/_precompute/wrightomega.py,sha256=YpmLwtGJ4qazMDY0RXjhnQiuRAISI-Pr9MwKc7pZlhc,955
+scipy/special/_precompute/zetac.py,sha256=LmhJP7JFg7XktHvfm-DgzuiWZFtVdpvYzzLOB1ePG1Q,591
+scipy/special/_sf_error.py,sha256=q_Rbfkws1ttgTQKYLt6zFTdY6DFX2HajJe_lXiNWC0c,375
+scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so,sha256=mTQWpR9jY-Fi9mWZxGtYurMHKXpepRk7xit6hdcMd2I,301592
+scipy/special/_spfun_stats.py,sha256=IjK325nhaTa7koQyvlVaeCo01TN9QWRpK6mDzkuuAq0,3779
+scipy/special/_spherical_bessel.py,sha256=XbbMLs_0qsmbuM7hIb0v6LPn5QrKLwhwAQYl5PtZYjc,10420
+scipy/special/_support_alternative_backends.py,sha256=SYomM7-qPmsMO_0UYzfpVAAdaU9Y9gPb6F6g0xBOnOo,2294
+scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so,sha256=5VlK0NlIAUTjGPoeBcSe8LNZ9CxHI7_udhCjxNZzhM0,289544
+scipy/special/_test_internal.pyi,sha256=BI0xSfTmREV92CPzaHbBo6LikARpqb9hubAQgTT0W6w,338
+scipy/special/_testutils.py,sha256=pnEE50AZrNe2FJ92fM1rsEcTY7lR-zYBE2paEPhI-wk,12027
+scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so,sha256=LcurBfEhiyqJLoEYUYU5SBgt4gAiBGwd-QjVsQy_s_g,1572553
+scipy/special/_ufuncs.pyi,sha256=Bop_e3jGG-wWIrCehOwR7Aa_qEuk-TfWi0C2Phkknmc,8937
+scipy/special/_ufuncs.pyx,sha256=yM5T3uRffyZS1vCfdBke-Kpdd9Y8GE0a0Ozpifl-EDw,890803
+scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so,sha256=i_UufGMbl4CilLlhfuk35wEIKfVAMFDJa3NiVVNMfnI,654984
+scipy/special/_ufuncs_cxx.pxd,sha256=xBBTzhemAneLScqm5Tf3Ufz64gfrMVoeKfE5-EpZmXM,1951
+scipy/special/_ufuncs_cxx.pyx,sha256=uwWM8H7h3Os4NvGdN6fE8OmWi5rN_rZZlnBN15eTvIU,10940
+scipy/special/_ufuncs_cxx_defs.h,sha256=Qi71Kwn1-fg0klmk8fBuGq0x7-DoolwkoJzaH4gyc34,2972
+scipy/special/_ufuncs_defs.h,sha256=Yhew1gtfnDeBLn6aQr0ysVmJwehm2R_4PqxlJAFAl7E,9216
+scipy/special/add_newdocs.py,sha256=np1hD4g1B2jNT4SOMq-6PUkTsGMBEucT5IuL3kcflCg,469
+scipy/special/basic.py,sha256=LRU8rIxXx42O4eVZv21nFwswAu7JFtQ42_4xT5BwYpE,1582
+scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so,sha256=G_YMrpbFbCOckbykBxkjvwnPxP9tqAuog6KMM8tWi_0,3161976
+scipy/special/cython_special.pxd,sha256=OzvZ0di3svc0wvTDEkufTwHCDiDU-F1GygJvsy_Kq0o,16349
+scipy/special/cython_special.pyi,sha256=BQVUCzV8lCylnmLCtnN0Yz_ttlqyzcLc-BZx2KPXPzM,58
+scipy/special/cython_special.pyx,sha256=E7lNHH4Jq07mM3keMhgxLmXn6i-qoTh421Ur1OSy2SY,142731
+scipy/special/orthogonal.py,sha256=2uWRTD_Wg83YzaMwYY8BAdyGVy4Z3iEc7ne5rLpdudo,1830
+scipy/special/sf_error.py,sha256=wOZqzX7iipkH39hOHqBlkmretJRbYy-K7PsnZPyaJFU,573
+scipy/special/specfun.py,sha256=bChigh8GnoirH0wQ8j_D_AY77Pl0Pd8ZqGNgjIMAZ84,826
+scipy/special/special/binom.h,sha256=Nbs4PzhKl-3bSs9AaslHgYYkQy3rHtb8ZiTXqqicW80,2359
+scipy/special/special/cephes/beta.h,sha256=V9TjdBG6gRBVykHA3fNL0fQZAdnIWxd2RbEkZ5bQkNA,7012
+scipy/special/special/cephes/const.h,sha256=ITr0sKUAP4CcYicPmmk65M9XFVupRgfF3FiqOewlbAI,2599
+scipy/special/special/cephes/gamma.h,sha256=AsGJQL5c7V9gahXe3B5_dFIfOsEK2KWqK4X8ECY3EHU,10337
+scipy/special/special/cephes/polevl.h,sha256=ClCCS13O-ePqXSxvmsPZNZR_RoyZQW7xMQo0ePSQmDU,4025
+scipy/special/special/cephes/psi.h,sha256=O9ZDjk-CbhsTpbg9jfQI5VxnxJYu9h5KfGUlf2mISxQ,6323
+scipy/special/special/cephes/trig.h,sha256=NvkMCTA1TpscUcqSQ1EIlbs7FYST2SyUdXvG2_EvANE,1304
+scipy/special/special/cephes/zeta.h,sha256=IvdUT0PdHreDUsPpjqiY4Uhvz0kq6tyegbY2CwU2u4w,4386
+scipy/special/special/config.h,sha256=aMf_pNKWE1iAgJNSnaCKqdPNuKK3Zq9uuck8h6f8Ggs,4315
+scipy/special/special/digamma.h,sha256=TG6_ayajnm-RQByvYF1ohZ93TxwDdnJwaAWoiRGDCRU,7303
+scipy/special/special/error.h,sha256=_sd-2bgRyCtPMb4wLD57i8GmfuYOINeP_o40iRRwvgE,1191
+scipy/special/special/evalpoly.h,sha256=E_GM-Idr-dF5WfeRdvhiYCioNtKRZ10kTBMON8wWm08,1131
+scipy/special/special/lambertw.h,sha256=E59hB9vFOQ3cr_jMrbt9xmwJTkXxTY4FGIFBJh-DSms,5205
+scipy/special/special/loggamma.h,sha256=eQFXyU7sOsRySn7GWV2DypOSfrwfEngSgZ3gTFKuC8k,6000
+scipy/special/special/trig.h,sha256=fLojwOOecF_eRJU5H86THXbZq1dK1hjVG98cLzN4WSg,3116
+scipy/special/special/zlog1.h,sha256=uojL5H_Oe7CipENnvenHNjUkDcXXK0qe6ynocDwSYuQ,977
+scipy/special/spfun_stats.py,sha256=fYFGN-9Q3X9zdm9KTyW6t2oixuaZzQwd_h0eyVvfGBk,545
+scipy/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_data.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_dd.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_logit.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_round.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sici.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_specfun.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spence.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_trig.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc,,
+scipy/special/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/tests/data/boost.npz,sha256=1z7Lu1FlRSI0K6BHCmJjqWhOYXwrg3RWX-OnlZP0sjE,1270643
+scipy/special/tests/data/gsl.npz,sha256=rKtwAgjLswHuUesfUSyxwn57TnUz_FpfXNXF1qoZfdg,51433
+scipy/special/tests/data/local.npz,sha256=ECuHbCfsTS-AQdWrL7bf78gUcCEzUWD1FUVeU-Bocf8,203438
+scipy/special/tests/test_basic.py,sha256=0F-3SOrg8xzCcl9NT8QOuXltThFVRHlaJfwNnxD1O64,171573
+scipy/special/tests/test_bdtr.py,sha256=QwGyt0tnutuou25mS0u2LjRgDTYI6ohM2cbZ-He6Os4,3231
+scipy/special/tests/test_boxcox.py,sha256=gUrGF7Ql1adxiPl_YxpsGunDfg-B_WpqI9Zghzool7o,2672
+scipy/special/tests/test_cdflib.py,sha256=zWmnQvdBdSbrlHg_kzoYBs5wfsVXiDuVH1N_2B5Ro48,17441
+scipy/special/tests/test_cdft_asymptotic.py,sha256=DBVVLaduZUHSWlKJ5aBXmxgdNm_YjLvWgyiTTcQq04c,1441
+scipy/special/tests/test_cosine_distr.py,sha256=zL7aWLisIEy1oNKjcynqncgsCxcPKvPb9Odr-J5Xa1M,2690
+scipy/special/tests/test_cython_special.py,sha256=3uVOa0p0OdaqxBWeyewQuedpnQtxDJB5kYolf1vRjoA,18838
+scipy/special/tests/test_data.py,sha256=iXTMMdNj-jCaXSVbhw3KTQrzLSk5wNQEdRBEDZ_2Cug,30269
+scipy/special/tests/test_dd.py,sha256=GROHQEkzIAW6KXkj8J3nPcRDAONcf1nCoArcfx30_5s,1974
+scipy/special/tests/test_digamma.py,sha256=Bm7Hh_aETx6MTN3Wu7Sijy4rYGR_1haNGsi3xfzrAKM,1382
+scipy/special/tests/test_ellip_harm.py,sha256=51KiCpQjqmf2uLZEsty-Vmr0FhoABtvMUz4218WR_S0,9640
+scipy/special/tests/test_erfinv.py,sha256=fzdEHd6MxfSyzQDO93qndXukG2jWj-XNY2X4BJRIdBI,3059
+scipy/special/tests/test_exponential_integrals.py,sha256=hlzNhZEXjo5ioPteG0P85qXuMmVD-WVc67e049tvY8Q,3687
+scipy/special/tests/test_faddeeva.py,sha256=YLY3Ylp4u_8zxTGxOb5kxNfXXEW0ld_GP2ceOR2ev_Y,2568
+scipy/special/tests/test_gamma.py,sha256=hb-ZlA2ZNz6gUGvVtMBgXFl_w30HPmthuUEAmNcz0sw,258
+scipy/special/tests/test_gammainc.py,sha256=Avv52EDQ7M8kUpiVU1BVsW_Gj5HDCzAOojLtoFojKbw,3815
+scipy/special/tests/test_hyp2f1.py,sha256=knYs5n6I8DwQEfbEj-CtXin9xPepe71Doqx1vQ3FYb0,78549
+scipy/special/tests/test_hypergeometric.py,sha256=LqbHLHkdsw8RnVeClpulG6rHRykqZsAyP43AUsKSiQI,5596
+scipy/special/tests/test_kolmogorov.py,sha256=0UoQN7q_De8Mx1NEUzhl9KGLNT8fdq6QoX11_vNS3e4,19410
+scipy/special/tests/test_lambertw.py,sha256=vd5G_70CQz3N_U15mcyE0-2KZ_8QYLKmrJ4ZL-RwFXY,4560
+scipy/special/tests/test_log_softmax.py,sha256=JdiC5C1Fm16rNdQHVWRu-FGMVOv24DPWRnguDDd1zEY,3415
+scipy/special/tests/test_loggamma.py,sha256=x6kuJf-bEnn5ECdkDSgvk3An_A-9UxVsZpqa49IwAq8,1992
+scipy/special/tests/test_logit.py,sha256=PvIgcK33vQjcvHE3_3fVarKTjZ0t35-ksZnhvoqKQrA,5540
+scipy/special/tests/test_logsumexp.py,sha256=Y4hPV6_KotWabV-v2OYVzz_tweKRlHXPCRVFqFk_0fY,6545
+scipy/special/tests/test_mpmath.py,sha256=h0rtQEkOubS2J_2DPq55pVn7dQmrDsiF6kemEWPSwNk,72665
+scipy/special/tests/test_nan_inputs.py,sha256=8aIQJ2Xz1O4Lr7cJz9KDjFj5SEVjccu3j8auelQ3lj8,1831
+scipy/special/tests/test_ndtr.py,sha256=-UMxTIi4CaaLoJ5-SGW9THChPIM3e1_fTY0L877ioNA,2680
+scipy/special/tests/test_ndtri_exp.py,sha256=13eabgdbfcL37RReiUH7g9amT9XMsTLOfwxFJXR_2Ww,3708
+scipy/special/tests/test_orthogonal.py,sha256=lPVOwR_LSrShHfCkhTrRMc2yJj0q3d6f54cW3-cwsVY,31538
+scipy/special/tests/test_orthogonal_eval.py,sha256=iT9QWDaz-V0J77mavxktZ-2oBdJ8y2JifOqiO-wGxk8,9491
+scipy/special/tests/test_owens_t.py,sha256=zRbiKje7KrYJ25f1ZuIBfiFSyNtK_bnkIW7dRETIqME,1792
+scipy/special/tests/test_pcf.py,sha256=RNjEWZGFS99DOGZkkPJ8HNqLULko8UkX0nEWFYX26NE,664
+scipy/special/tests/test_pdtr.py,sha256=VmupC2ezUR3p5tgZx0rqXEHAtzsikBW2YgaIxuGwO5A,1284
+scipy/special/tests/test_powm1.py,sha256=9hZeiQVKqV63J5oguYXv_vqolpnJX2XRO1JN0ouLWAM,2276
+scipy/special/tests/test_precompute_expn_asy.py,sha256=bCQikPkWbxVUeimvo79ToVPgwaudzxGC7Av-hPBgIU4,583
+scipy/special/tests/test_precompute_gammainc.py,sha256=6XSz0LTbFRT-k0SlnPhYtpzrlxKHaL_CZbPyDhhfT5E,4459
+scipy/special/tests/test_precompute_utils.py,sha256=MOvdbLbzjN5Z1JQQgtIyjwjuIMPX4s2bTc_kxaX67wc,1165
+scipy/special/tests/test_round.py,sha256=oZdjvm0Fxhv6o09IFOi8UUuLb3msbq00UdD8P_2Jwaw,421
+scipy/special/tests/test_sf_error.py,sha256=iXZ3bCSQ3oa5_PvrJSfpZme4Ymix5drIcE1Ji2Kfwqo,3902
+scipy/special/tests/test_sici.py,sha256=w4anBf8fiq2fmkwMSz3MX0uy35NLXVqfuW3Fwt2Nqek,1227
+scipy/special/tests/test_specfun.py,sha256=4nKU8JoGF8s4hHo0m_mUZpScU4ZkYKVhVLTBcjxVouc,1196
+scipy/special/tests/test_spence.py,sha256=fChPw7xncNCTPMUGb0C8BC-lDKHWoEXSz8Rb4Wv8vNo,1099
+scipy/special/tests/test_spfun_stats.py,sha256=mKJZ2-kLmVK3ZqX3UlDi9Mx4bRQZ9YoXQW2fxrW2kZs,1997
+scipy/special/tests/test_sph_harm.py,sha256=ySUesSgZBb4RN-QES2L6G6k3QGOCdGLt86fjJ-6EYiQ,1106
+scipy/special/tests/test_spherical_bessel.py,sha256=80H9ub9vzX4QomYZAQk-3IkCI8fNgO-dompHI3QtBVg,14311
+scipy/special/tests/test_support_alternative_backends.py,sha256=PHpXGaxGDvJeZS6mcGTxTHHDf1b2HnWh_dX1i0oLKpU,2650
+scipy/special/tests/test_trig.py,sha256=ZlzoL1qKvw2ZCbIYTNYm6QkeKqYUSeE7kUghELXZwzU,2332
+scipy/special/tests/test_wright_bessel.py,sha256=v1yLL6Ki01VuKPj5nfL-9_FaACvwdIlDsarKsm-z9EQ,4155
+scipy/special/tests/test_wrightomega.py,sha256=BW8TS_CuDjR7exA4l6ADnKhXwgFWUYaN1UIopMBJUZY,3560
+scipy/special/tests/test_zeta.py,sha256=IoBUdssBRj7noPjW-xs9xGFFihZ7wvQpPJidgMOFCOs,1367
+scipy/stats/__init__.py,sha256=k9cOA7sGZ_GO0_AbE9ecVlg-zsq2vbM6HBjKh4CjHjM,18163
+scipy/stats/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc,,
+scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc,,
+scipy/stats/__pycache__/_binomtest.cpython-310.pyc,,
+scipy/stats/__pycache__/_bws_test.cpython-310.pyc,,
+scipy/stats/__pycache__/_censored_data.cpython-310.pyc,,
+scipy/stats/__pycache__/_common.cpython-310.pyc,,
+scipy/stats/__pycache__/_constants.cpython-310.pyc,,
+scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc,,
+scipy/stats/__pycache__/_covariance.cpython-310.pyc,,
+scipy/stats/__pycache__/_crosstab.cpython-310.pyc,,
+scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc,,
+scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc,,
+scipy/stats/__pycache__/_distr_params.cpython-310.pyc,,
+scipy/stats/__pycache__/_entropy.cpython-310.pyc,,
+scipy/stats/__pycache__/_fit.cpython-310.pyc,,
+scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc,,
+scipy/stats/__pycache__/_hypotests.cpython-310.pyc,,
+scipy/stats/__pycache__/_kde.cpython-310.pyc,,
+scipy/stats/__pycache__/_ksstats.cpython-310.pyc,,
+scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc,,
+scipy/stats/__pycache__/_morestats.cpython-310.pyc,,
+scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc,,
+scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc,,
+scipy/stats/__pycache__/_multicomp.cpython-310.pyc,,
+scipy/stats/__pycache__/_multivariate.cpython-310.pyc,,
+scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc,,
+scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc,,
+scipy/stats/__pycache__/_qmc.cpython-310.pyc,,
+scipy/stats/__pycache__/_qmvnt.cpython-310.pyc,,
+scipy/stats/__pycache__/_relative_risk.cpython-310.pyc,,
+scipy/stats/__pycache__/_resampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_result_classes.cpython-310.pyc,,
+scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_sensitivity_analysis.cpython-310.pyc,,
+scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc,,
+scipy/stats/__pycache__/_stats_py.cpython-310.pyc,,
+scipy/stats/__pycache__/_survival.cpython-310.pyc,,
+scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc,,
+scipy/stats/__pycache__/_variation.cpython-310.pyc,,
+scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc,,
+scipy/stats/__pycache__/_wilcoxon.cpython-310.pyc,,
+scipy/stats/__pycache__/biasedurn.cpython-310.pyc,,
+scipy/stats/__pycache__/contingency.cpython-310.pyc,,
+scipy/stats/__pycache__/distributions.cpython-310.pyc,,
+scipy/stats/__pycache__/kde.cpython-310.pyc,,
+scipy/stats/__pycache__/morestats.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats_basic.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats_extras.cpython-310.pyc,,
+scipy/stats/__pycache__/mvn.cpython-310.pyc,,
+scipy/stats/__pycache__/qmc.cpython-310.pyc,,
+scipy/stats/__pycache__/sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/stats.cpython-310.pyc,,
+scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so,sha256=_qaK1oLgr9v_wkyB8LZEl35zR4a6WJsMXpKAzwC1lJU,277968
+scipy/stats/_axis_nan_policy.py,sha256=NnZZH10vl4E8UNNosfmMWh-lv8Xr_4LWeuuwQhJw1qI,29107
+scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so,sha256=Pnje7AjKCEaghydHuepyNQGPEY7jnWL0agzckcjxrxw,359512
+scipy/stats/_biasedurn.pxd,sha256=bQC6xG4RH1E5h2jCKXRMADfgGctiO5TgNlJegKrR7DY,1046
+scipy/stats/_binned_statistic.py,sha256=JYbpISuP2vn7U0FD7W5CWffC2dbMwAVeBLIlKJyxy8Q,32712
+scipy/stats/_binomtest.py,sha256=aW6p-vRkv3pSB8_0nTfT3kNAhV8Ip44A39EEPyl9Wlc,13118
+scipy/stats/_boost/__init__.py,sha256=e1_a5N-BBpz7qb0VeLQ7FOEURW9OfQ3tV42_fMDVkOU,1759
+scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=EKYn1JRW_eTSrQXegBZ0Xp7VWE1yUNhTXoscFT1JRso,204728
+scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=iJxMfF0bHb11DZxFjoU5_2vUdjvQwhG1Mz7sQfrQfFc,176008
+scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=xVuzmm0MQF8xsfLaRevOqobV9mFkN5C6OIp_1anhh9U,120848
+scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=yFrVVDJg_-QyLuDscN7WGoO1mlDga75evYoelsY4kuQ,171176
+scipy/stats/_boost/nbinom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=fuwzokUvIwKP5RBGnKzIwsygJ-_da2yRp2BW84F5be8,180336
+scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=naUKrQb-OrawOtL7EnyGBqjaqoMrfEghb6oVdOBAHuA,174120
+scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=dqECFMhlCux7V0jvyMmgmrtzaYRVjWK8Gycdveh0BOo,223872
+scipy/stats/_boost/ncx2_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=Kdu_108i98ry18M_Bvh-SquZhd0mrYvOy8RqNG4J4pE,174968
+scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=LPhGuDWa6YF1bgUZas4-JlP4Yc4mZWSI497-4ntc9Bs,109096
+scipy/stats/_bws_test.py,sha256=XQMGiLMPKFN3b6O4nD5tkZdcI8D8vggSx8B7XLJ5EGs,7062
+scipy/stats/_censored_data.py,sha256=Ts7GSYYti2z-8yoOJTedj6aCLnGhugLlDRdxZc4rPxs,18306
+scipy/stats/_common.py,sha256=4RqXT04Knp1CoOJuSBV6Uy_XmcmtVr0bImAbSk_VHlQ,172
+scipy/stats/_constants.py,sha256=_afhD206qrU0xVct9aXqc_ly_RFDbDdr0gul9Nz6LCg,962
+scipy/stats/_continuous_distns.py,sha256=sKcoHSKqUAskV8xkIDx26U04wWzZxDZlkA5HFNjauPQ,386328
+scipy/stats/_covariance.py,sha256=vu5OY1tuC5asr3FnwukQKwwJKUDP-Rlp0Kbe1mT36qM,22527
+scipy/stats/_crosstab.py,sha256=f4Sqooh-gPyTjLMHRbmhkVaOT-nhrOZ2NJ-gfPjvyuY,7355
+scipy/stats/_discrete_distns.py,sha256=7Hm_bUNUBM8cgjepOOWLE3se17Jtg8e07W1jL1seBHo,59346
+scipy/stats/_distn_infrastructure.py,sha256=3QkGaXLtQF-AF4KhHamPCJSJQVXekOQmkX2tNpWUTv4,148306
+scipy/stats/_distr_params.py,sha256=odGVYiGgrvM6UFujQZd9K0u6ojIIgHlURtsD7x7kAxU,8732
+scipy/stats/_entropy.py,sha256=b0wlhLQRWEIDZrOTMFfRwx4aPE6HqnJ6HTtBGoGXrpM,15232
+scipy/stats/_fit.py,sha256=_Abj6CcENqRz0z4O27Zp1q002JrXzdnKCo2KL7RjvUg,59771
+scipy/stats/_generate_pyx.py,sha256=gHEsVa0zFLC5CSEpsalRLxA0R6DP1ghV9VPV1_ZxDh8,829
+scipy/stats/_hypotests.py,sha256=-3f22z3TZNK7W_Cu-xmf2vy_gALLXYW3paYw48sNzcI,78852
+scipy/stats/_kde.py,sha256=8eZxz9JkZXUphFb6-ibzvT2fUpMY615kU4KmwRYMu4I,25138
+scipy/stats/_ksstats.py,sha256=Svh0qUd7GI1qmMNRIlv8_AfH0Rf7SmVn9mQ2gQdjd3k,20116
+scipy/stats/_levy_stable/__init__.py,sha256=n6IgB_ZpXpe05d3399bs31shsCZVepUOIrrW7pt149g,45541
+scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so,sha256=AhrBC3lJHLulZ34FL8coumDDtKGT4nMHsA2imYy8YLA,66512
+scipy/stats/_mannwhitneyu.py,sha256=GojWBxRMWgQEGGSJjona90xX18AYiKcSPjJy9rvqtV0,20522
+scipy/stats/_morestats.py,sha256=RwInwxrEuX7q4GORyyVN6AVnXPVLCaoO2t-RZS3dK_k,186567
+scipy/stats/_mstats_basic.py,sha256=2mJYZK1eNIgRcptmSjZgKsRr0DKtNCAbxLEQiwuvRWA,119363
+scipy/stats/_mstats_extras.py,sha256=TeBf3hF0OtcnDk3pTW6iutrzW0H0T7dXx923gHib2pY,16370
+scipy/stats/_multicomp.py,sha256=ae_nYfCQVLduyPb5sRTCcV0MpcymnV4H8SM35u3E8NY,17282
+scipy/stats/_multivariate.py,sha256=ZPrMbYAus8PUyWDWu87ZWf7fdhQUQrqsX8okqlnQmFY,237847
+scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so,sha256=5Blqrk4HPmojSUIAaTA8byccxam5LulYu_OV8C1gfW4,84952
+scipy/stats/_odds_ratio.py,sha256=S_zkibLVH7K8Qj6IO6sTkXtq-lGsp8sj_wIXitgu7Es,17858
+scipy/stats/_page_trend_test.py,sha256=OvisWd3E6CF7rdFRGv46HWOfJlyHalMITt5iJPzE8LI,18987
+scipy/stats/_qmc.py,sha256=ZwXM8sAjx8NfkHXQOC6uEdvIydj-vSfHVks73njFGnY,99365
+scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so,sha256=KnU9jGK3JJX0Jie06f2IRZ36iMXHL5hkETDJx-8Yles,286880
+scipy/stats/_qmc_cy.pyi,sha256=xOpTSlaG_1YDZhkJjQQtukbcgOTAR9FpcRMkU5g9mXc,1134
+scipy/stats/_qmvnt.py,sha256=Mss1xkmWwM3o4Y_Mw78JI-eB4pZBeig47oAVpBcrMMc,18767
+scipy/stats/_rcont/__init__.py,sha256=dUzWdRuJNAxnGYVFjDqUB8DMYti3by1WziKEfBDOlB4,84
+scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so,sha256=82H3m0rDu7M0sfoqoJxPSTGMnXKkAdrnxpxuSjhYV7g,299408
+scipy/stats/_relative_risk.py,sha256=5zeYBMshYwtomiLTkaXc1nmWYD0FsaQNjf0iuDadtSc,9571
+scipy/stats/_resampling.py,sha256=4PzopnEwUUZVMkPZlcBl4fddOu1HCZolna8iOmPenXc,81473
+scipy/stats/_result_classes.py,sha256=_ghuGdpFsCMuEmnfHg1AeorR-fASc77ACXYWEmQzXjI,1085
+scipy/stats/_rvs_sampling.py,sha256=Hz5U8lTHrVPZtGg-OeAKzSA5HW9M51OwH8AU4j2xXVM,2233
+scipy/stats/_sampling.py,sha256=YJ1mG2tkXW4Em-virElY-cNzMXn8lHbOxNxujqDsPY0,46408
+scipy/stats/_sensitivity_analysis.py,sha256=qu5mNpZZhggy0mywqB8jsqcZZagzsH0mICG4FIz7bhM,24745
+scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so,sha256=lD6iGaUNOL4TzPVLWM1MC019odo5DiOj6j3nVz0AXrA,403816
+scipy/stats/_sobol.pyi,sha256=TAywylI75AF9th9QZY8TYfHvIQ1cyM5QZi7eBOAkrbg,971
+scipy/stats/_sobol_direction_numbers.npz,sha256=SFmTEUfULORluGBcsnf5V9mLg50DGU_fBleTV5BtGTs,589334
+scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so,sha256=QzRr7fyOgXBEmeRgMl-9NCBfiXZg8SllcXtv31TYf_8,766320
+scipy/stats/_stats.pxd,sha256=US2p3SKahv_OPhZClWl_h3cZe7UncGZoQJeixoeFOPg,708
+scipy/stats/_stats_mstats_common.py,sha256=ken8kD9hSgUOhmN6biu0d9QNaumzMB5uLb04ZQeib0Y,18593
+scipy/stats/_stats_py.py,sha256=7Ny49fBYXJkDUB4q55MuTm1z4ZPjbZTjZvcbtUtIqnQ,423593
+scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so,sha256=SJVnF2IAScl7diLBUJkDJ3vBcs3HHH5S1L7cNRDzh1Y,158904
+scipy/stats/_survival.py,sha256=a6pNTOpNnkq3XFoGuid1cJrsObuzpgI7psUzP0PU2j0,26005
+scipy/stats/_tukeylambda_stats.py,sha256=eodvo09rCVfcYa1Uh6BKHKvXyY8K5Zg2uGQX1phQ6Ew,6871
+scipy/stats/_unuran/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/stats/_unuran/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so,sha256=-ysKtSG4zPKLCTCJlIraToMgjxPhIc4NIxTGqS4wDiw,1589832
+scipy/stats/_unuran/unuran_wrapper.pyi,sha256=RGAWLNAHrkAtaS-EjIkcTIr7sag9b0Lx_3i7s_keBfk,5551
+scipy/stats/_variation.py,sha256=oHqUpfaL49IxpLmgac1te5Av5MXuScP9XrxRzywJR6I,4375
+scipy/stats/_warnings_errors.py,sha256=MpucxNFYEDytXh7vrZCMqTkRfuXTvvMpQ2W_Ak2OnPk,1196
+scipy/stats/_wilcoxon.py,sha256=wkgJyjir4LkHSeJXWKn1akskHxnNB9_ZGKEZ-8CqfH4,7936
+scipy/stats/biasedurn.py,sha256=kSspd2wFUf85L3FgTYA04jg7oq9ROtqppSMMoPfPm7E,529
+scipy/stats/contingency.py,sha256=8Imh2sKSk_il8o55LaQTC0HMODNnjC4aAv4RW6W0zCk,16275
+scipy/stats/distributions.py,sha256=9Kt2fyTohorJcf6a7M9DYH8Nu4jEU66nKP01cRhKmuE,859
+scipy/stats/kde.py,sha256=_Bawa8xgGYr6hM1c7AM1eKFSZMuV124sA_NIKUqG7Ho,720
+scipy/stats/morestats.py,sha256=q2zUyJucrLoBeADOzPjI8ZeOXvuAzg_wGowBG4EdmMU,1391
+scipy/stats/mstats.py,sha256=aRbrykjrvl-qOBkmGjlFMH4rbWYSqBBQHReanSAomFg,2466
+scipy/stats/mstats_basic.py,sha256=y0qYsc9UjIN6FLUTDGRZSteuDvLsvyDYbru25xfWCKQ,1888
+scipy/stats/mstats_extras.py,sha256=aORMhUJUmlI23msX7BA-GwTH3TeUZg1qRA9IE5X5WWM,785
+scipy/stats/mvn.py,sha256=1vEs5P-H69S2KnQjUiAvA5E3VxyiAOutYPr2npkQ2LE,565
+scipy/stats/qmc.py,sha256=qN3l4emoGfQKZMOAnFgoQaKh2bJGaBzgCGwW1Ba9mU4,11663
+scipy/stats/sampling.py,sha256=Tyd68aXwZV51Fwr5pl41WapJ05OG3XWWcYlsQeg6LgA,1683
+scipy/stats/stats.py,sha256=YPMYFQOjf3NFWt1kkXTZNMe62TpHaaBDa7CjIvQkw24,2140
+scipy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/stats/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc,,
+scipy/stats/tests/common_tests.py,sha256=buhvK6hFtUkMIu1iKuiqXwbg_IGeVJ0e4Ml66xuzFXg,12288
+scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc,,
+scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc,,
+scipy/stats/tests/data/_mvt.py,sha256=OvFCmMqI74DWIgo32UV55dP1nzvFvYBSyYcmKJes9pI,6905
+scipy/stats/tests/data/fisher_exact_results_from_r.py,sha256=BKxPAi4h3IOebcZYGxCbutYuAX0tlb40P0DEkfEi918,27349
+scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy,sha256=JU0t7kpNVHuTMcYCQ8b8_K_9JsixBNCNT2BFp2RbO7o,4064
+scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy,sha256=zxjB8tZaIyvyxxISgt8xvyqL6Cevr8TtgQ7TdFfuiYo,183728
+scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy,sha256=_umVErq0zMZWm0e5JOSwNOHNurViT6_H4SBki9X3oSg,183688
+scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy,sha256=88cZ7dVDH7nnuey20Z48p6kJUpi9GfImaFsPykDwwHM,9328
+scipy/stats/tests/data/nist_anova/AtmWtAg.dat,sha256=Qdd0i7H4cNhAABfFOZPuplhi_9SCquFpO-hNkyRcMD8,3063
+scipy/stats/tests/data/nist_anova/SiRstv.dat,sha256=x9wJ2g1qnzf4DK_w9F_WiOiDMDEg4td2z6uU77G07xM,1947
+scipy/stats/tests/data/nist_anova/SmLs01.dat,sha256=KdnJedRthF7XLA-w7XkIPIMTgzu89yBAMmZA2H4uQOQ,6055
+scipy/stats/tests/data/nist_anova/SmLs02.dat,sha256=nCPyxRk1dAoSPWiC7kG4dLaXs2GL3-KRXRt2NwgXoIA,46561
+scipy/stats/tests/data/nist_anova/SmLs03.dat,sha256=6yPHiQSk0KI4oURQOk99t-uEm-IZN-8eIPHb_y0mQ1U,451566
+scipy/stats/tests/data/nist_anova/SmLs04.dat,sha256=fI-HpgJF9cdGdBinclhVzOcWCCc5ZJZuXalUwirV-lc,6815
+scipy/stats/tests/data/nist_anova/SmLs05.dat,sha256=iJTaAWUFn7DPLTd9bQh_EMKEK1DPG0fnN8xk7BQlPRE,53799
+scipy/stats/tests/data/nist_anova/SmLs06.dat,sha256=riOkYT-LRgmJhPpCK32x7xYnD38gwnh_Eo1X8OK3eN8,523605
+scipy/stats/tests/data/nist_anova/SmLs07.dat,sha256=QtSS11d-vkVvqaIEeJ6oNwyET1CKoyQqjlfBl2sTOJA,7381
+scipy/stats/tests/data/nist_anova/SmLs08.dat,sha256=qrxQQ0I6gnhrefygKwT48x-bz-8laD8Vpn7c81nITRg,59228
+scipy/stats/tests/data/nist_anova/SmLs09.dat,sha256=qmELOQyNlH7CWOMt8PQ0Z_yxgg9Hxc4lqZOuHZxxWuc,577633
+scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDDbhCzKSfwFGr3hwZ1nq30,2591
+scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy,sha256=7vTccC3YxuMcGMdOH4EoTD6coqtQKC3jnJrTC3u4520,38624
+scipy/stats/tests/data/studentized_range_mpmath_ref.json,sha256=icZGNBodwmJNzOyEki9MreI2lS6nQJNWfnVJiHRNRNM,29239
+scipy/stats/tests/test_axis_nan_policy.py,sha256=pNw12PLiF58FVRUPvFvE-DbNGokYS8AH-QFcyJO-lV0,51478
+scipy/stats/tests/test_binned_statistic.py,sha256=WE5KdJq4zJxZ1LuYp8lv-RMcTEyjuSkjvFHWsGMujkM,18814
+scipy/stats/tests/test_boost_ufuncs.py,sha256=B9lwHkVasspQA78Rz3vtLQESnPRC7Z6R9druZeebs9Q,1825
+scipy/stats/tests/test_censored_data.py,sha256=pAQfSHhmcetcxoS1ZgIHVm1pEbapW7az7I-y_8phb5w,6935
+scipy/stats/tests/test_contingency.py,sha256=fMeGnTldQjLa5CSaaQ6qH90JXzrUivthVD-9DafgQm0,7706
+scipy/stats/tests/test_continuous_basic.py,sha256=-XYuKdMujql8lSh3Xq-vX0UGV32RI0-S0722lmepnkg,41793
+scipy/stats/tests/test_continuous_fit_censored.py,sha256=7hu1sSo9hhh0g9pmPMmjj2BI2rkxvA1h20XdMYZeyog,24188
+scipy/stats/tests/test_crosstab.py,sha256=tvCoZGfVasNIhYxLQIe3dcdMm34s2ykxxPmCRTIOFc0,3882
+scipy/stats/tests/test_discrete_basic.py,sha256=6wVF_k93w1I2ZMtb2kaJ2LK0rygVKoiPRNm87Oue1gE,19924
+scipy/stats/tests/test_discrete_distns.py,sha256=tdrO5avvjTRHi9z1uXIxmqGIZKO8hCCGwgY0cLrnLkI,22684
+scipy/stats/tests/test_distributions.py,sha256=_0zETqAJu1LQi4hqfmlCuR-7L-IMDTCzD860V7kcFII,384266
+scipy/stats/tests/test_entropy.py,sha256=92tO5uF3bpqUoU0gpmn89fInuKjVTatXPf5hwh9Kbns,11281
+scipy/stats/tests/test_fast_gen_inversion.py,sha256=2FV7tIuHWfjLGO4xMDi4j5poA1zBwEs-tpkwSVDaLrs,15889
+scipy/stats/tests/test_fit.py,sha256=GqCiCnEivEGOkloerHmKClzwAzQa-bpvf6-nWVP0Qys,45662
+scipy/stats/tests/test_hypotests.py,sha256=e8FUHEowBTmeixb1g9yTpvs5mZofJeRQJmlxVaqHS1o,80302
+scipy/stats/tests/test_kdeoth.py,sha256=cCEieP06bjuIrS-V5P7q6T7st0z5zG1AR9KyEywvWew,20470
+scipy/stats/tests/test_morestats.py,sha256=leIrk4vutRvjFxgREgs7zVcPDnI96QOh1BNn_nYKNiE,127621
+scipy/stats/tests/test_mstats_basic.py,sha256=4dvTBP06G8tEbqZwimB9y0HxHGdyor_x21AbUHeqn6o,86407
+scipy/stats/tests/test_mstats_extras.py,sha256=CCexzT1lksTG_WvGvHn6-CuWd_ZXoFviNGnBZd_hE7Y,7297
+scipy/stats/tests/test_multicomp.py,sha256=xLlLP54cWsLAbSsfodoTkuJa9FJM1qKnlSrDGE-jRZ0,17826
+scipy/stats/tests/test_multivariate.py,sha256=naPnWGp6fXMS4ALDnqDd4p2oWmTEqYbczxzTQi5494E,153313
+scipy/stats/tests/test_odds_ratio.py,sha256=RIsmgnmUUH3DvynDRZUaS6llCbXm2oWIfPa48IJJ-gI,6705
+scipy/stats/tests/test_qmc.py,sha256=MsZ_hgjfxSXpqLlkKrk8x1FJy8ImmZwF2cVrcc1uiKM,54645
+scipy/stats/tests/test_rank.py,sha256=uxJXitafsPrfI3yrdVOT1Hiz3abzy5vCRafSnpn_KfU,11721
+scipy/stats/tests/test_relative_risk.py,sha256=jzOGNQ2y9_YfFnXiGAiRDrgahy66qQkw6ZkHgygCJMA,3646
+scipy/stats/tests/test_resampling.py,sha256=X8uKrXUDZbKETZrPmv5cmHilyfIzyfwj5OPPm5beUyw,71766
+scipy/stats/tests/test_sampling.py,sha256=EOtDuGLi87801MG0rkDsJ6n7PfIO8f44n4xjdt0vxY4,54513
+scipy/stats/tests/test_sensitivity_analysis.py,sha256=mMifx96zCAx1OOM0Er3ugd_S2I6bih9GF1pir6djNyQ,10134
+scipy/stats/tests/test_stats.py,sha256=yNC3SPq7IPFJWZLJxBAZS4z3n_mn8VzVAL8VV1yug8M,360179
+scipy/stats/tests/test_survival.py,sha256=ky3R88sMfKUkqTs6wXUTjOjK1BzCWpxS16crycohUps,22265
+scipy/stats/tests/test_tukeylambda_stats.py,sha256=6WUBNVoTseVjfrHfWXtU11gTgmRcdnwAPLQOI0y_5U8,3231
+scipy/stats/tests/test_variation.py,sha256=Xnsn0fk4lqtk-ji1VhXxTdDAg9fHv02Q6Uv82-Xx6v4,6292
+scipy/version.py,sha256=JZiJ_CrvfTTDHoKWU40OrVAfmZynLFk9T5-Wy9SNHuo,264
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..4e4c38ae320920b8f083b87f408214cdecd350d2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: meson
+Root-Is-Purelib: false
+Tag: cp310-cp310-manylinux_2_17_x86_64
+Tag: cp310-cp310-manylinux2014_x86_64
+
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..de6633112c1f9951fd688e1fb43457a1ec11d6d8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2010-2020 Benjamin Peterson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..6d7525c2ebcfe25cb6787579bf5324da1fd6f28a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA
@@ -0,0 +1,49 @@
+Metadata-Version: 2.1
+Name: six
+Version: 1.16.0
+Summary: Python 2 and 3 compatibility utilities
+Home-page: https://github.com/benjaminp/six
+Author: Benjamin Peterson
+Author-email: benjamin@python.org
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*
+
+.. image:: https://img.shields.io/pypi/v/six.svg
+   :target: https://pypi.org/project/six/
+   :alt: six on PyPI
+
+.. image:: https://travis-ci.org/benjaminp/six.svg?branch=master
+   :target: https://travis-ci.org/benjaminp/six
+   :alt: six on TravisCI
+
+.. image:: https://readthedocs.org/projects/six/badge/?version=latest
+   :target: https://six.readthedocs.io/
+   :alt: six's documentation on Read the Docs
+
+.. image:: https://img.shields.io/badge/license-MIT-green.svg
+   :target: https://github.com/benjaminp/six/blob/master/LICENSE
+   :alt: MIT License badge
+
+Six is a Python 2 and 3 compatibility library.  It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions.  See the
+documentation for more information on what is provided.
+
+Six supports Python 2.7 and 3.3+.  It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at https://six.readthedocs.io/.
+
+Bugs can be reported to https://github.com/benjaminp/six.  The code can also
+be found there.
+
+
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..ed0b59873cf4b0e6dcb600c4ebf5db608b49b01f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD
@@ -0,0 +1,8 @@
+__pycache__/six.cpython-310.pyc,,
+six-1.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+six-1.16.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066
+six-1.16.0.dist-info/METADATA,sha256=VQcGIFCAEmfZcl77E5riPCN4v2TIsc_qtacnjxKHJoI,1795
+six-1.16.0.dist-info/RECORD,,
+six-1.16.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+six-1.16.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
+six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..01b8fc7d4a10cb8b4f1d21f11d3398d07d6b3478
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ffe2fce498955b628014618b28c6bcf152466a4a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+six
diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f2927f5f8147f137783bb5072794999e04655cfd
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2019, threadpoolctl contributors
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the name of copyright holder nor the names of its contributors
+      may be used to endorse or promote products derived from this software
+      without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..a2aca2c91847b4368d403b04f8335a20a70bf565
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/METADATA
@@ -0,0 +1,399 @@
+Metadata-Version: 2.1
+Name: threadpoolctl
+Version: 3.5.0
+Summary: threadpoolctl
+Home-page: https://github.com/joblib/threadpoolctl
+License: BSD-3-Clause
+Author: Thomas Moreau
+Author-email: thomas.moreau.2010@gmail.com
+Requires-Python: >=3.8
+Description-Content-Type: text/markdown
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+# Thread-pool Controls [![Build Status](https://dev.azure.com/joblib/threadpoolctl/_apis/build/status/joblib.threadpoolctl?branchName=master)](https://dev.azure.com/joblib/threadpoolctl/_build/latest?definitionId=1&branchName=master) [![codecov](https://codecov.io/gh/joblib/threadpoolctl/branch/master/graph/badge.svg)](https://codecov.io/gh/joblib/threadpoolctl)
+
+Python helpers to limit the number of threads used in the
+threadpool-backed of common native libraries used for scientific
+computing and data science (e.g. BLAS and OpenMP).
+
+Fine control of the underlying thread-pool size can be useful in
+workloads that involve nested parallelism so as to mitigate
+oversubscription issues.
+
+## Installation
+
+- For users, install the last published version from PyPI:
+
+  ```bash
+  pip install threadpoolctl
+  ```
+
+- For contributors, install from the source repository in developer
+  mode:
+
+  ```bash
+  pip install -r dev-requirements.txt
+  flit install --symlink
+  ```
+
+  then you run the tests with pytest:
+
+  ```bash
+  pytest
+  ```
+
+## Usage
+
+### Command Line Interface
+
+Get a JSON description of thread-pools initialized when importing python
+packages such as numpy or scipy for instance:
+
+```
+python -m threadpoolctl -i numpy scipy.linalg
+[
+  {
+    "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so",
+    "prefix": "libmkl_rt",
+    "user_api": "blas",
+    "internal_api": "mkl",
+    "version": "2019.0.4",
+    "num_threads": 2,
+    "threading_layer": "intel"
+  },
+  {
+    "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so",
+    "prefix": "libiomp",
+    "user_api": "openmp",
+    "internal_api": "openmp",
+    "version": null,
+    "num_threads": 4
+  }
+]
+```
+
+The JSON information is written on STDOUT. If some of the packages are missing,
+a warning message is displayed on STDERR.
+
+### Python Runtime Programmatic Introspection
+
+Introspect the current state of the threadpool-enabled runtime libraries
+that are loaded when importing Python packages:
+
+```python
+>>> from threadpoolctl import threadpool_info
+>>> from pprint import pprint
+>>> pprint(threadpool_info())
+[]
+
+>>> import numpy
+>>> pprint(threadpool_info())
+[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so',
+  'internal_api': 'mkl',
+  'num_threads': 2,
+  'prefix': 'libmkl_rt',
+  'threading_layer': 'intel',
+  'user_api': 'blas',
+  'version': '2019.0.4'},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so',
+  'internal_api': 'openmp',
+  'num_threads': 4,
+  'prefix': 'libiomp',
+  'user_api': 'openmp',
+  'version': None}]
+
+>>> import xgboost
+>>> pprint(threadpool_info())
+[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so',
+  'internal_api': 'mkl',
+  'num_threads': 2,
+  'prefix': 'libmkl_rt',
+  'threading_layer': 'intel',
+  'user_api': 'blas',
+  'version': '2019.0.4'},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so',
+  'internal_api': 'openmp',
+  'num_threads': 4,
+  'prefix': 'libiomp',
+  'user_api': 'openmp',
+  'version': None},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libgomp.so.1.0.0',
+  'internal_api': 'openmp',
+  'num_threads': 4,
+  'prefix': 'libgomp',
+  'user_api': 'openmp',
+  'version': None}]
+```
+
+In the above example, `numpy` was installed from the default anaconda channel and comes
+with MKL and its Intel OpenMP (`libiomp5`) implementation while `xgboost` was installed
+from pypi.org and links against GNU OpenMP (`libgomp`) so both OpenMP runtimes are
+loaded in the same Python program.
+
+The state of these libraries is also accessible through the object oriented API:
+
+```python
+>>> from threadpoolctl import ThreadpoolController, threadpool_info
+>>> from pprint import pprint
+>>> import numpy
+>>> controller = ThreadpoolController()
+>>> pprint(controller.info())
+[{'architecture': 'Haswell',
+  'filepath': '/home/jeremie/miniconda/envs/dev/lib/libopenblasp-r0.3.17.so',
+  'internal_api': 'openblas',
+  'num_threads': 4,
+  'prefix': 'libopenblas',
+  'threading_layer': 'pthreads',
+  'user_api': 'blas',
+  'version': '0.3.17'}]
+
+>>> controller.info() == threadpool_info()
+True
+```
+
+### Setting the Maximum Size of Thread-Pools
+
+Control the number of threads used by the underlying runtime libraries
+in specific sections of your Python program:
+
+```python
+>>> from threadpoolctl import threadpool_limits
+>>> import numpy as np
+
+>>> with threadpool_limits(limits=1, user_api='blas'):
+...     # In this block, calls to blas implementation (like openblas or MKL)
+...     # will be limited to use only one thread. They can thus be used jointly
+...     # with thread-parallelism.
+...     a = np.random.randn(1000, 1000)
+...     a_squared = a @ a
+```
+
+The threadpools can also be controlled via the object oriented API, which is especially
+useful to avoid searching through all the loaded shared libraries each time. It will
+however not act on libraries loaded after the instantiation of the
+`ThreadpoolController`:
+
+```python
+>>> from threadpoolctl import ThreadpoolController
+>>> import numpy as np
+>>> controller = ThreadpoolController()
+
+>>> with controller.limit(limits=1, user_api='blas'):
+...     a = np.random.randn(1000, 1000)
+...     a_squared = a @ a
+```
+
+### Restricting the limits to the scope of a function
+
+`threadpool_limits` and `ThreadpoolController` can also be used as decorators to set
+the maximum number of threads used by the supported libraries at a function level. The
+decorators are accessible through their `wrap` method:
+
+```python
+>>> from threadpoolctl import ThreadpoolController, threadpool_limits
+>>> import numpy as np
+>>> controller = ThreadpoolController()
+
+>>> @controller.wrap(limits=1, user_api='blas')
+... # or @threadpool_limits.wrap(limits=1, user_api='blas')
+... def my_func():
+...     # Inside this function, calls to blas implementation (like openblas or MKL)
+...     # will be limited to use only one thread.
+...     a = np.random.randn(1000, 1000)
+...     a_squared = a @ a
+...
+```
+
+### Switching the FlexiBLAS backend
+
+`FlexiBLAS` is a BLAS wrapper for which the BLAS backend can be switched at runtime.
+`threadpoolctl` exposes python bindings for this feature. Here's an example but note
+that this part of the API is experimental and subject to change without deprecation:
+
+```python
+>>> from threadpoolctl import ThreadpoolController
+>>> import numpy as np
+>>> controller = ThreadpoolController()
+
+>>> controller.info()
+[{'user_api': 'blas',
+  'internal_api': 'flexiblas',
+  'num_threads': 1,
+  'prefix': 'libflexiblas',
+  'filepath': '/usr/local/lib/libflexiblas.so.3.3',
+  'version': '3.3.1',
+  'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'],
+  'loaded_backends': ['NETLIB'],
+  'current_backend': 'NETLIB'}]
+
+# Retrieve the flexiblas controller
+>>> flexiblas_ct = controller.select(internal_api="flexiblas").lib_controllers[0]
+
+# Switch the backend with one predefined at build time (listed in "available_backends")
+>>> flexiblas_ct.switch_backend("OPENBLASPTHREAD")
+>>> controller.info()
+[{'user_api': 'blas',
+  'internal_api': 'flexiblas',
+  'num_threads': 4,
+  'prefix': 'libflexiblas',
+  'filepath': '/usr/local/lib/libflexiblas.so.3.3',
+  'version': '3.3.1',
+  'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'],
+  'loaded_backends': ['NETLIB', 'OPENBLASPTHREAD'],
+  'current_backend': 'OPENBLASPTHREAD'},
+ {'user_api': 'blas',
+  'internal_api': 'openblas',
+  'num_threads': 4,
+  'prefix': 'libopenblas',
+  'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.8.so',
+  'version': '0.3.8',
+  'threading_layer': 'pthreads',
+  'architecture': 'Haswell'}]
+
+# It's also possible to directly give the path to a shared library
+>>> flexiblas_controller.switch_backend("/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so")
+>>> controller.info()
+[{'user_api': 'blas',
+  'internal_api': 'flexiblas',
+  'num_threads': 2,
+  'prefix': 'libflexiblas',
+  'filepath': '/usr/local/lib/libflexiblas.so.3.3',
+  'version': '3.3.1',
+  'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'],
+  'loaded_backends': ['NETLIB',
+   'OPENBLASPTHREAD',
+   '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so'],
+  'current_backend': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so'},
+ {'user_api': 'openmp',
+  'internal_api': 'openmp',
+  'num_threads': 4,
+  'prefix': 'libomp',
+  'filepath': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libomp.so',
+  'version': None},
+ {'user_api': 'blas',
+  'internal_api': 'openblas',
+  'num_threads': 4,
+  'prefix': 'libopenblas',
+  'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.8.so',
+  'version': '0.3.8',
+  'threading_layer': 'pthreads',
+  'architecture': 'Haswell'},
+ {'user_api': 'blas',
+  'internal_api': 'mkl',
+  'num_threads': 2,
+  'prefix': 'libmkl_rt',
+  'filepath': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so.2',
+  'version': '2024.0-Product',
+  'threading_layer': 'gnu'}]
+```
+
+You can observe that the previously linked OpenBLAS shared object stays loaded by
+the Python program indefinitely, but FlexiBLAS itself no longer delegates BLAS calls
+to OpenBLAS as indicated by the `current_backend` attribute.
+### Writing a custom library controller
+
+Currently, `threadpoolctl` has support for `OpenMP` and the main `BLAS` libraries.
+However it can also be used to control the threadpool of other native libraries,
+provided that they expose an API to get and set the limit on the number of threads.
+For that, one must implement a controller for this library and register it to
+`threadpoolctl`.
+
+A custom controller must be a subclass of the `LibController` class and implement
+the attributes and methods described in the docstring of `LibController`. Then this
+new controller class must be registered using the `threadpoolctl.register` function.
+An complete example can be found [here](
+  https://github.com/joblib/threadpoolctl/blob/master/tests/_pyMylib/__init__.py).
+
+### Sequential BLAS within OpenMP parallel region
+
+When one wants to have sequential BLAS calls within an OpenMP parallel region, it's
+safer to set `limits="sequential_blas_under_openmp"` since setting `limits=1` and
+`user_api="blas"` might not lead to the expected behavior in some configurations
+(e.g. OpenBLAS with the OpenMP threading layer
+https://github.com/xianyi/OpenBLAS/issues/2985).
+
+### Known Limitations
+
+- `threadpool_limits` can fail to limit the number of inner threads when nesting
+  parallel loops managed by distinct OpenMP runtime implementations (for instance
+  libgomp from GCC and libomp from clang/llvm or libiomp from ICC).
+
+  See the `test_openmp_nesting` function in [tests/test_threadpoolctl.py](
+  https://github.com/joblib/threadpoolctl/blob/master/tests/test_threadpoolctl.py)
+  for an example. More information can be found at:
+  https://github.com/jeremiedbb/Nested_OpenMP
+
+  Note however that this problem does not happen when `threadpool_limits` is
+  used to limit the number of threads used internally by BLAS calls that are
+  themselves nested under OpenMP parallel loops. `threadpool_limits` works as
+  expected, even if the inner BLAS implementation relies on a distinct OpenMP
+  implementation.
+
+- Using Intel OpenMP (ICC) and LLVM OpenMP (clang) in the same Python program
+  under Linux is known to cause problems. See the following guide for more details
+  and workarounds:
+  https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md
+
+- Setting the maximum number of threads of the OpenMP and BLAS libraries has a global
+  effect and impacts the whole Python process. There is no thread level isolation as
+  these libraries do not offer thread-local APIs to configure the number of threads to
+  use in nested parallel calls.
+
+
+## Maintainers
+
+To make a release:
+
+- Bump the version number (`__version__`) in `threadpoolctl.py` and update the
+  release date in `CHANGES.md`.
+
+- Build the distribution archives:
+
+```bash
+pip install flit
+flit build
+```
+
+and check the contents of `dist/`.
+
+- If everything is fine, make a commit for the release, tag it and push the
+tag to github:
+
+```bash
+git tag -a X.Y.Z
+git push git@github.com:joblib/threadpoolctl.git X.Y.Z
+```
+
+- Upload the wheels and source distribution to PyPI using flit. Since PyPI doesn't
+  allow password authentication anymore, the username needs to be changed to the
+  generic name `__token__`:
+
+```bash
+FLIT_USERNAME=__token__ flit publish
+```
+
+  and a PyPI token has to be passed in place of the password.
+
+- Create a PR for the release on the [conda-forge feedstock](https://github.com/conda-forge/threadpoolctl-feedstock) (or wait for the bot to make it).
+
+- Publish the release on github.
+
+### Credits
+
+The initial dynamic library introspection code was written by @anton-malakhov
+for the smp package available at https://github.com/IntelPython/smp .
+
+threadpoolctl extends this for other operating systems. Contrary to smp,
+threadpoolctl does not attempt to limit the size of Python multiprocessing
+pools (threads or processes) or set operating system-level CPU affinity
+constraints: threadpoolctl only interacts with native libraries via their
+public runtime APIs.
+
diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..212ec7792901753594d7be9a9cb24949833db5a4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/RECORD
@@ -0,0 +1,7 @@
+__pycache__/threadpoolctl.cpython-310.pyc,,
+threadpoolctl-3.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+threadpoolctl-3.5.0.dist-info/LICENSE,sha256=gaxhkHUkiwblNmC2UtEOSF9GdfXQrg-X6iI3DaH34js,1507
+threadpoolctl-3.5.0.dist-info/METADATA,sha256=bii6pzPjk7LkchgJA-bH78CUpoWJFgkYNbA9GgRcsIw,13850
+threadpoolctl-3.5.0.dist-info/RECORD,,
+threadpoolctl-3.5.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
+threadpoolctl.py,sha256=8HuaDv5jTRKMgptXrlqMzxSsSyNiA7u9bcb84smbVaM,50023
diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl-3.5.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.9.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.py b/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8588729004b2c22b2e2150679d63120e122579a4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2017-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+"""Python interface to the Zstandard (zstd) compression library."""
+
+from __future__ import absolute_import, unicode_literals
+
+# This module serves 2 roles:
+#
+# 1) Export the C or CFFI "backend" through a central module.
+# 2) Implement additional functionality built on top of C or CFFI backend.
+
+import builtins
+import io
+import os
+import platform
+
+from typing import ByteString
+
+# Some Python implementations don't support C extensions. That's why we have
+# a CFFI implementation in the first place. The code here import one of our
+# "backends" then re-exports the symbols from this module. For convenience,
+# we support falling back to the CFFI backend if the C extension can't be
+# imported. But for performance reasons, we only do this on unknown Python
+# implementation. Notably, for CPython we require the C extension by default.
+# Because someone will inevitably want special behavior, the behavior is
+# configurable via an environment variable. A potentially better way to handle
+# this is to import a special ``__importpolicy__`` module or something
+# defining a variable and `setup.py` could write the file with whatever
+# policy was specified at build time. Until someone needs it, we go with
+# the hacky but simple environment variable approach.
+_module_policy = os.environ.get("PYTHON_ZSTANDARD_IMPORT_POLICY", "default")
+
+if _module_policy == "default":
+    if platform.python_implementation() in ("CPython",):
+        from .backend_c import *  # type: ignore
+
+        backend = "cext"
+    elif platform.python_implementation() in ("PyPy",):
+        from .backend_cffi import *  # type: ignore
+
+        backend = "cffi"
+    else:
+        try:
+            from .backend_c import *
+
+            backend = "cext"
+        except ImportError:
+            from .backend_cffi import *
+
+            backend = "cffi"
+elif _module_policy == "cffi_fallback":
+    try:
+        from .backend_c import *
+
+        backend = "cext"
+    except ImportError:
+        from .backend_cffi import *
+
+        backend = "cffi"
+elif _module_policy == "rust":
+    from .backend_rust import *  # type: ignore
+
+    backend = "rust"
+elif _module_policy == "cext":
+    from .backend_c import *
+
+    backend = "cext"
+elif _module_policy == "cffi":
+    from .backend_cffi import *
+
+    backend = "cffi"
+else:
+    raise ImportError(
+        "unknown module import policy: %s; use default, cffi_fallback, "
+        "cext, or cffi" % _module_policy
+    )
+
+# Keep this in sync with python-zstandard.h, rust-ext/src/lib.rs, and debian/changelog.
+__version__ = "0.22.0"
+
+_MODE_CLOSED = 0
+_MODE_READ = 1
+_MODE_WRITE = 2
+
+
+def open(
+    filename,
+    mode="rb",
+    cctx=None,
+    dctx=None,
+    encoding=None,
+    errors=None,
+    newline=None,
+    closefd=None,
+):
+    """Create a file object with zstd (de)compression.
+
+    The object returned from this function will be a
+    :py:class:`ZstdDecompressionReader` if opened for reading in binary mode,
+    a :py:class:`ZstdCompressionWriter` if opened for writing in binary mode,
+    or an ``io.TextIOWrapper`` if opened for reading or writing in text mode.
+
+    :param filename:
+       ``bytes``, ``str``, or ``os.PathLike`` defining a file to open or a
+       file object (with a ``read()`` or ``write()`` method).
+    :param mode:
+       ``str`` File open mode. Accepts any of the open modes recognized by
+       ``open()``.
+    :param cctx:
+       ``ZstdCompressor`` to use for compression. If not specified and file
+       is opened for writing, the default ``ZstdCompressor`` will be used.
+    :param dctx:
+       ``ZstdDecompressor`` to use for decompression. If not specified and file
+       is opened for reading, the default ``ZstdDecompressor`` will be used.
+    :param encoding:
+        ``str`` that defines text encoding to use when file is opened in text
+        mode.
+    :param errors:
+       ``str`` defining text encoding error handling mode.
+    :param newline:
+       ``str`` defining newline to use in text mode.
+    :param closefd:
+       ``bool`` whether to close the file when the returned object is closed.
+        Only used if a file object is passed. If a filename is specified, the
+        opened file is always closed when the returned object is closed.
+    """
+    normalized_mode = mode.replace("t", "")
+
+    if normalized_mode in ("r", "rb"):
+        dctx = dctx or ZstdDecompressor()
+        open_mode = "r"
+        raw_open_mode = "rb"
+    elif normalized_mode in ("w", "wb", "a", "ab", "x", "xb"):
+        cctx = cctx or ZstdCompressor()
+        open_mode = "w"
+        raw_open_mode = normalized_mode
+        if not raw_open_mode.endswith("b"):
+            raw_open_mode = raw_open_mode + "b"
+    else:
+        raise ValueError("Invalid mode: {!r}".format(mode))
+
+    if hasattr(os, "PathLike"):
+        types = (str, bytes, os.PathLike)
+    else:
+        types = (str, bytes)
+
+    if isinstance(filename, types):  # type: ignore
+        inner_fh = builtins.open(filename, raw_open_mode)
+        closefd = True
+    elif hasattr(filename, "read") or hasattr(filename, "write"):
+        inner_fh = filename
+        closefd = bool(closefd)
+    else:
+        raise TypeError(
+            "filename must be a str, bytes, file or PathLike object"
+        )
+
+    if open_mode == "r":
+        fh = dctx.stream_reader(inner_fh, closefd=closefd)
+    elif open_mode == "w":
+        fh = cctx.stream_writer(inner_fh, closefd=closefd)
+    else:
+        raise RuntimeError("logic error in zstandard.open() handling open mode")
+
+    if "b" not in normalized_mode:
+        return io.TextIOWrapper(
+            fh, encoding=encoding, errors=errors, newline=newline
+        )
+    else:
+        return fh
+
+
+def compress(data: ByteString, level: int = 3) -> bytes:
+    """Compress source data using the zstd compression format.
+
+    This performs one-shot compression using basic/default compression
+    settings.
+
+    This method is provided for convenience and is equivalent to calling
+    ``ZstdCompressor(level=level).compress(data)``.
+
+    If you find yourself calling this function in a tight loop,
+    performance will be greater if you construct a single ``ZstdCompressor``
+    and repeatedly call ``compress()`` on it.
+    """
+    cctx = ZstdCompressor(level=level)
+
+    return cctx.compress(data)
+
+
+def decompress(data: ByteString, max_output_size: int = 0) -> bytes:
+    """Decompress a zstd frame into its original data.
+
+    This performs one-shot decompression using basic/default compression
+    settings.
+
+    This method is provided for convenience and is equivalent to calling
+    ``ZstdDecompressor().decompress(data, max_output_size=max_output_size)``.
+
+    If you find yourself calling this function in a tight loop, performance
+    will be greater if you construct a single ``ZstdDecompressor`` and
+    repeatedly call ``decompress()`` on it.
+    """
+    dctx = ZstdDecompressor()
+
+    return dctx.decompress(data, max_output_size=max_output_size)
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..c95a73e89b9a3bbcf740cc5daf63d16a92472130
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/zstandard/__init__.pyi
@@ -0,0 +1,480 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import os
+
+from typing import (
+    BinaryIO,
+    ByteString,
+    Generator,
+    IO,
+    Iterable,
+    List,
+    Optional,
+    Set,
+    Tuple,
+    Union,
+)
+
+FLUSH_BLOCK: int
+FLUSH_FRAME: int
+
+COMPRESSOBJ_FLUSH_FINISH: int
+COMPRESSOBJ_FLUSH_BLOCK: int
+
+CONTENTSIZE_UNKNOWN: int
+CONTENTSIZE_ERROR: int
+
+MAX_COMPRESSION_LEVEL: int
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE: int
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE: int
+
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE: int
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE: int
+
+BLOCKSIZELOG_MAX: int
+BLOCKSIZE_MAX: int
+
+WINDOWLOG_MIN: int
+WINDOWLOG_MAX: int
+
+CHAINLOG_MIN: int
+CHAINLOG_MAX: int
+HASHLOG_MIN: int
+HASHLOG_MAX: int
+MINMATCH_MIN: int
+MINMATCH_MAX: int
+SEARCHLOG_MIN: int
+SEARCHLOG_MAX: int
+SEARCHLENGTH_MIN: int
+SEARCHLENGTH_MAX: int
+TARGETLENGTH_MIN: int
+TARGETLENGTH_MAX: int
+LDM_MINMATCH_MIN: int
+LDM_MINMATCH_MAX: int
+LDM_BUCKETSIZELOG_MAX: int
+
+STRATEGY_FAST: int
+STRATEGY_DFAST: int
+STRATEGY_GREEDY: int
+STRATEGY_LAZY: int
+STRATEGY_LAZY2: int
+STRATEGY_BTLAZY2: int
+STRATEGY_BTOPT: int
+STRATEGY_BTULTRA: int
+STRATEGY_BTULTRA2: int
+
+DICT_TYPE_AUTO: int
+DICT_TYPE_RAWCONTENT: int
+DICT_TYPE_FULLDICT: int
+
+FORMAT_ZSTD1: int
+FORMAT_ZSTD1_MAGICLESS: int
+
+ZSTD_VERSION: Tuple[int, int, int]
+FRAME_HEADER: bytes
+MAGIC_NUMBER: int
+
+backend: str
+backend_features: Set[str]
+__version__: str
+
+class ZstdError(Exception): ...
+
+class BufferSegment(object):
+    offset: int
+    def __len__(self) -> int: ...
+    def tobytes(self) -> bytes: ...
+
+class BufferSegments(object):
+    def __len__(self) -> int: ...
+    def __getitem__(self, i: int) -> BufferSegment: ...
+
+class BufferWithSegments(object):
+    size: int
+    def __init__(self, data: ByteString, segments: ByteString): ...
+    def __len__(self) -> int: ...
+    def __getitem__(self, i: int) -> BufferSegment: ...
+    def segments(self): ...
+    def tobytes(self) -> bytes: ...
+
+class BufferWithSegmentsCollection(object):
+    def __init__(self, *args): ...
+    def __len__(self) -> int: ...
+    def __getitem__(self, i: int) -> BufferSegment: ...
+    def size(self) -> int: ...
+
+class ZstdCompressionParameters(object):
+    @staticmethod
+    def from_level(
+        level: int, source_size: int = ..., dict_size: int = ..., **kwargs
+    ) -> "ZstdCompressionParameters": ...
+    def __init__(
+        self,
+        format: int = ...,
+        compression_level: int = ...,
+        window_log: int = ...,
+        hash_log: int = ...,
+        chain_log: int = ...,
+        search_log: int = ...,
+        min_match: int = ...,
+        target_length: int = ...,
+        strategy: int = ...,
+        write_content_size: int = ...,
+        write_checksum: int = ...,
+        write_dict_id: int = ...,
+        job_size: int = ...,
+        overlap_log: int = ...,
+        force_max_window: int = ...,
+        enable_ldm: int = ...,
+        ldm_hash_log: int = ...,
+        ldm_min_match: int = ...,
+        ldm_bucket_size_log: int = ...,
+        ldm_hash_rate_log: int = ...,
+        threads: int = ...,
+    ): ...
+    @property
+    def format(self) -> int: ...
+    @property
+    def compression_level(self) -> int: ...
+    @property
+    def window_log(self) -> int: ...
+    @property
+    def hash_log(self) -> int: ...
+    @property
+    def chain_log(self) -> int: ...
+    @property
+    def search_log(self) -> int: ...
+    @property
+    def min_match(self) -> int: ...
+    @property
+    def target_length(self) -> int: ...
+    @property
+    def strategy(self) -> int: ...
+    @property
+    def write_content_size(self) -> int: ...
+    @property
+    def write_checksum(self) -> int: ...
+    @property
+    def write_dict_id(self) -> int: ...
+    @property
+    def job_size(self) -> int: ...
+    @property
+    def overlap_log(self) -> int: ...
+    @property
+    def force_max_window(self) -> int: ...
+    @property
+    def enable_ldm(self) -> int: ...
+    @property
+    def ldm_hash_log(self) -> int: ...
+    @property
+    def ldm_min_match(self) -> int: ...
+    @property
+    def ldm_bucket_size_log(self) -> int: ...
+    @property
+    def ldm_hash_rate_log(self) -> int: ...
+    @property
+    def threads(self) -> int: ...
+    def estimated_compression_context_size(self) -> int: ...
+
+class CompressionParameters(ZstdCompressionParameters): ...
+
+class ZstdCompressionDict(object):
+    k: int
+    d: int
+    def __init__(
+        self,
+        data: ByteString,
+        dict_type: int = ...,
+        k: int = ...,
+        d: int = ...,
+    ): ...
+    def __len__(self) -> int: ...
+    def dict_id(self) -> int: ...
+    def as_bytes(self) -> bytes: ...
+    def precompute_compress(
+        self,
+        level: int = ...,
+        compression_params: ZstdCompressionParameters = ...,
+    ): ...
+
+class ZstdCompressionObj(object):
+    def compress(self, data: ByteString) -> bytes: ...
+    def flush(self, flush_mode: int = ...) -> bytes: ...
+
+class ZstdCompressionChunker(object):
+    def compress(self, data: ByteString): ...
+    def flush(self): ...
+    def finish(self): ...
+
+class ZstdCompressionReader(BinaryIO):
+    def __enter__(self) -> "ZstdCompressionReader": ...
+    def __exit__(self, exc_type, exc_value, exc_tb): ...
+    def readable(self) -> bool: ...
+    def writable(self) -> bool: ...
+    def seekable(self) -> bool: ...
+    def readline(self, limit: int = ...) -> bytes: ...
+    def readlines(self, hint: int = ...) -> List[bytes]: ...
+    def write(self, data: ByteString): ...
+    def writelines(self, data: Iterable[bytes]): ...
+    def isatty(self) -> bool: ...
+    def flush(self): ...
+    def close(self): ...
+    @property
+    def closed(self) -> bool: ...
+    def tell(self) -> int: ...
+    def readall(self) -> bytes: ...
+    def __iter__(self): ...
+    def __next__(self): ...
+    def next(self): ...
+    def read(self, size: int = ...) -> bytes: ...
+    def read1(self, size: int = ...) -> bytes: ...
+    def readinto(self, b) -> int: ...
+    def readinto1(self, b) -> int: ...
+
+class ZstdCompressionWriter(BinaryIO):
+    def __enter__(self) -> "ZstdCompressionWriter": ...
+    def __exit__(self, exc_type, exc_value, exc_tb): ...
+    def memory_size(self) -> int: ...
+    def fileno(self) -> int: ...
+    def close(self): ...
+    @property
+    def closed(self) -> bool: ...
+    def isatty(self) -> bool: ...
+    def readable(self) -> bool: ...
+    def readline(self, size: int = ...) -> bytes: ...
+    def readlines(self, hint: int = ...) -> List[bytes]: ...
+    def seek(self, offset: int, whence: int = ...): ...
+    def seekable(self) -> bool: ...
+    def truncate(self, size: int = ...): ...
+    def writable(self) -> bool: ...
+    def writelines(self, lines: Iterable[bytes]): ...
+    def read(self, size: int = ...) -> bytes: ...
+    def readall(self) -> bytes: ...
+    def readinto(self, b): ...
+    def write(self, data: ByteString) -> int: ...
+    def flush(self, flush_mode: int = ...) -> int: ...
+    def tell(self) -> int: ...
+
+class ZstdCompressor(object):
+    def __init__(
+        self,
+        level: int = ...,
+        dict_data: Optional[ZstdCompressionDict] = ...,
+        compression_params: Optional[ZstdCompressionParameters] = ...,
+        write_checksum: Optional[bool] = ...,
+        write_content_size: Optional[bool] = ...,
+        write_dict_id: Optional[bool] = ...,
+        threads: int = ...,
+    ): ...
+    def memory_size(self) -> int: ...
+    def compress(self, data: ByteString) -> bytes: ...
+    def compressobj(self, size: int = ...) -> ZstdCompressionObj: ...
+    def chunker(
+        self, size: int = ..., chunk_size: int = ...
+    ) -> ZstdCompressionChunker: ...
+    def copy_stream(
+        self,
+        ifh: IO[bytes],
+        ofh: IO[bytes],
+        size: int = ...,
+        read_size: int = ...,
+        write_size: int = ...,
+    ) -> Tuple[int, int]: ...
+    def stream_reader(
+        self,
+        source: Union[IO[bytes], ByteString],
+        size: int = ...,
+        read_size: int = ...,
+        *,
+        closefd: bool = ...,
+    ) -> ZstdCompressionReader: ...
+    def stream_writer(
+        self,
+        writer: IO[bytes],
+        size: int = ...,
+        write_size: int = ...,
+        write_return_read: bool = ...,
+        *,
+        closefd: bool = ...,
+    ) -> ZstdCompressionWriter: ...
+    def read_to_iter(
+        self,
+        reader: Union[IO[bytes], ByteString],
+        size: int = ...,
+        read_size: int = ...,
+        write_size: int = ...,
+    ) -> Generator[bytes, None, None]: ...
+    def frame_progression(self) -> Tuple[int, int, int]: ...
+    def multi_compress_to_buffer(
+        self,
+        data: Union[
+            BufferWithSegments,
+            BufferWithSegmentsCollection,
+            List[ByteString],
+        ],
+        threads: int = ...,
+    ) -> BufferWithSegmentsCollection: ...
+
+class ZstdDecompressionObj(object):
+    def decompress(self, data: ByteString) -> bytes: ...
+    def flush(self, length: int = ...) -> bytes: ...
+    @property
+    def unused_data(self) -> bytes: ...
+    @property
+    def unconsumed_tail(self) -> bytes: ...
+    @property
+    def eof(self) -> bool: ...
+
+class ZstdDecompressionReader(BinaryIO):
+    def __enter__(self) -> "ZstdDecompressionReader": ...
+    def __exit__(self, exc_type, exc_value, exc_tb): ...
+    def readable(self) -> bool: ...
+    def writable(self) -> bool: ...
+    def seekable(self) -> bool: ...
+    def readline(self, size: int = ...): ...
+    def readlines(self, hint: int = ...): ...
+    def write(self, data: ByteString): ...
+    def writelines(self, lines: Iterable[bytes]): ...
+    def isatty(self) -> bool: ...
+    def flush(self): ...
+    def close(self): ...
+    @property
+    def closed(self) -> bool: ...
+    def tell(self) -> int: ...
+    def readall(self) -> bytes: ...
+    def __iter__(self): ...
+    def __next__(self): ...
+    def next(self): ...
+    def read(self, size: int = ...) -> bytes: ...
+    def readinto(self, b) -> int: ...
+    def read1(self, size: int = ...) -> bytes: ...
+    def readinto1(self, b) -> int: ...
+    def seek(self, pos: int, whence: int = ...) -> int: ...
+
+class ZstdDecompressionWriter(BinaryIO):
+    def __enter__(self) -> "ZstdDecompressionWriter": ...
+    def __exit__(self, exc_type, exc_value, exc_tb): ...
+    def memory_size(self) -> int: ...
+    def close(self): ...
+    @property
+    def closed(self) -> bool: ...
+    def fileno(self) -> int: ...
+    def flush(self): ...
+    def isatty(self) -> bool: ...
+    def readable(self) -> bool: ...
+    def readline(self, size: int = ...): ...
+    def readlines(self, hint: int = ...): ...
+    def seek(self, offset: int, whence: int = ...): ...
+    def seekable(self) -> bool: ...
+    def tell(self): ...
+    def truncate(self, size: int = ...): ...
+    def writable(self) -> bool: ...
+    def writelines(self, lines: Iterable[bytes]): ...
+    def read(self, size: int = ...): ...
+    def readall(self): ...
+    def readinto(self, b): ...
+    def write(self, data: ByteString) -> int: ...
+
+class ZstdDecompressor(object):
+    def __init__(
+        self,
+        dict_data: Optional[ZstdCompressionDict] = ...,
+        max_window_size: int = ...,
+        format: int = ...,
+    ): ...
+    def memory_size(self) -> int: ...
+    def decompress(
+        self,
+        data: ByteString,
+        max_output_size: int = ...,
+        read_across_frames: bool = ...,
+        allow_extra_data: bool = ...,
+    ) -> bytes: ...
+    def stream_reader(
+        self,
+        source: Union[IO[bytes], ByteString],
+        read_size: int = ...,
+        read_across_frames: bool = ...,
+        *,
+        closefd=False,
+    ) -> ZstdDecompressionReader: ...
+    def decompressobj(
+        self, write_size: int = ..., read_across_frames: bool = False
+    ) -> ZstdDecompressionObj: ...
+    def read_to_iter(
+        self,
+        reader: Union[IO[bytes], ByteString],
+        read_size: int = ...,
+        write_size: int = ...,
+        skip_bytes: int = ...,
+    ) -> Generator[bytes, None, None]: ...
+    def stream_writer(
+        self,
+        writer: IO[bytes],
+        write_size: int = ...,
+        write_return_read: bool = ...,
+        *,
+        closefd: bool = ...,
+    ) -> ZstdDecompressionWriter: ...
+    def copy_stream(
+        self,
+        ifh: IO[bytes],
+        ofh: IO[bytes],
+        read_size: int = ...,
+        write_size: int = ...,
+    ) -> Tuple[int, int]: ...
+    def decompress_content_dict_chain(
+        self, frames: list[ByteString]
+    ) -> bytes: ...
+    def multi_decompress_to_buffer(
+        self,
+        frames: Union[
+            BufferWithSegments,
+            BufferWithSegmentsCollection,
+            List[ByteString],
+        ],
+        decompressed_sizes: ByteString = ...,
+        threads: int = ...,
+    ) -> BufferWithSegmentsCollection: ...
+
+class FrameParameters(object):
+    content_size: int
+    window_size: int
+    dict_id: int
+    has_checksum: bool
+
+def estimate_decompression_context_size() -> int: ...
+def frame_content_size(data: ByteString) -> int: ...
+def frame_header_size(data: ByteString) -> int: ...
+def get_frame_parameters(data: ByteString) -> FrameParameters: ...
+def train_dictionary(
+    dict_size: int,
+    samples: list[ByteString],
+    k: int = ...,
+    d: int = ...,
+    f: int = ...,
+    split_point: float = ...,
+    accel: int = ...,
+    notifications: int = ...,
+    dict_id: int = ...,
+    level: int = ...,
+    steps: int = ...,
+    threads: int = ...,
+) -> ZstdCompressionDict: ...
+def open(
+    filename: Union[bytes, str, os.PathLike, BinaryIO],
+    mode: str = ...,
+    cctx: Optional[ZstdCompressor] = ...,
+    dctx: Optional[ZstdDecompressor] = ...,
+    encoding: Optional[str] = ...,
+    errors: Optional[str] = ...,
+    newline: Optional[str] = ...,
+    closefd: bool = ...,
+): ...
+def compress(data: ByteString, level: int = ...) -> bytes: ...
+def decompress(data: ByteString, max_output_size: int = ...) -> bytes: ...
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c1ca95ea8696aa8910f30b9144eaaccb155d066
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a07b061dcf3eb695f6ab6b03cd5d36e91388648d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/backend_cffi.py b/llmeval-env/lib/python3.10/site-packages/zstandard/backend_cffi.py
new file mode 100644
index 0000000000000000000000000000000000000000..7137542f189cf842006b5da461583f0486a94493
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/zstandard/backend_cffi.py
@@ -0,0 +1,4477 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+"""Python interface to the Zstandard (zstd) compression library."""
+
+from __future__ import absolute_import, unicode_literals
+
+# This should match what the C extension exports.
+__all__ = [
+    "BufferSegment",
+    "BufferSegments",
+    "BufferWithSegments",
+    "BufferWithSegmentsCollection",
+    "ZstdCompressionChunker",
+    "ZstdCompressionDict",
+    "ZstdCompressionObj",
+    "ZstdCompressionParameters",
+    "ZstdCompressionReader",
+    "ZstdCompressionWriter",
+    "ZstdCompressor",
+    "ZstdDecompressionObj",
+    "ZstdDecompressionReader",
+    "ZstdDecompressionWriter",
+    "ZstdDecompressor",
+    "ZstdError",
+    "FrameParameters",
+    "backend_features",
+    "estimate_decompression_context_size",
+    "frame_content_size",
+    "frame_header_size",
+    "get_frame_parameters",
+    "train_dictionary",
+    # Constants.
+    "FLUSH_BLOCK",
+    "FLUSH_FRAME",
+    "COMPRESSOBJ_FLUSH_FINISH",
+    "COMPRESSOBJ_FLUSH_BLOCK",
+    "ZSTD_VERSION",
+    "FRAME_HEADER",
+    "CONTENTSIZE_UNKNOWN",
+    "CONTENTSIZE_ERROR",
+    "MAX_COMPRESSION_LEVEL",
+    "COMPRESSION_RECOMMENDED_INPUT_SIZE",
+    "COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+    "DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
+    "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+    "MAGIC_NUMBER",
+    "BLOCKSIZELOG_MAX",
+    "BLOCKSIZE_MAX",
+    "WINDOWLOG_MIN",
+    "WINDOWLOG_MAX",
+    "CHAINLOG_MIN",
+    "CHAINLOG_MAX",
+    "HASHLOG_MIN",
+    "HASHLOG_MAX",
+    "MINMATCH_MIN",
+    "MINMATCH_MAX",
+    "SEARCHLOG_MIN",
+    "SEARCHLOG_MAX",
+    "SEARCHLENGTH_MIN",
+    "SEARCHLENGTH_MAX",
+    "TARGETLENGTH_MIN",
+    "TARGETLENGTH_MAX",
+    "LDM_MINMATCH_MIN",
+    "LDM_MINMATCH_MAX",
+    "LDM_BUCKETSIZELOG_MAX",
+    "STRATEGY_FAST",
+    "STRATEGY_DFAST",
+    "STRATEGY_GREEDY",
+    "STRATEGY_LAZY",
+    "STRATEGY_LAZY2",
+    "STRATEGY_BTLAZY2",
+    "STRATEGY_BTOPT",
+    "STRATEGY_BTULTRA",
+    "STRATEGY_BTULTRA2",
+    "DICT_TYPE_AUTO",
+    "DICT_TYPE_RAWCONTENT",
+    "DICT_TYPE_FULLDICT",
+    "FORMAT_ZSTD1",
+    "FORMAT_ZSTD1_MAGICLESS",
+]
+
+import io
+import os
+
+from ._cffi import (  # type: ignore
+    ffi,
+    lib,
+)
+
+
+backend_features = set()  # type: ignore
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize()
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize()
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize()
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize()
+
+new_nonzero = ffi.new_allocator(should_clear_after_alloc=False)
+
+
+MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel()
+MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER
+FRAME_HEADER = b"\x28\xb5\x2f\xfd"
+CONTENTSIZE_UNKNOWN = lib.ZSTD_CONTENTSIZE_UNKNOWN
+CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR
+ZSTD_VERSION = (
+    lib.ZSTD_VERSION_MAJOR,
+    lib.ZSTD_VERSION_MINOR,
+    lib.ZSTD_VERSION_RELEASE,
+)
+
+BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX
+BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX
+WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN
+WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX
+CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN
+CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX
+HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN
+HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX
+MINMATCH_MIN = lib.ZSTD_MINMATCH_MIN
+MINMATCH_MAX = lib.ZSTD_MINMATCH_MAX
+SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN
+SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX
+SEARCHLENGTH_MIN = lib.ZSTD_MINMATCH_MIN
+SEARCHLENGTH_MAX = lib.ZSTD_MINMATCH_MAX
+TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN
+TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX
+LDM_MINMATCH_MIN = lib.ZSTD_LDM_MINMATCH_MIN
+LDM_MINMATCH_MAX = lib.ZSTD_LDM_MINMATCH_MAX
+LDM_BUCKETSIZELOG_MAX = lib.ZSTD_LDM_BUCKETSIZELOG_MAX
+
+STRATEGY_FAST = lib.ZSTD_fast
+STRATEGY_DFAST = lib.ZSTD_dfast
+STRATEGY_GREEDY = lib.ZSTD_greedy
+STRATEGY_LAZY = lib.ZSTD_lazy
+STRATEGY_LAZY2 = lib.ZSTD_lazy2
+STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2
+STRATEGY_BTOPT = lib.ZSTD_btopt
+STRATEGY_BTULTRA = lib.ZSTD_btultra
+STRATEGY_BTULTRA2 = lib.ZSTD_btultra2
+
+DICT_TYPE_AUTO = lib.ZSTD_dct_auto
+DICT_TYPE_RAWCONTENT = lib.ZSTD_dct_rawContent
+DICT_TYPE_FULLDICT = lib.ZSTD_dct_fullDict
+
+FORMAT_ZSTD1 = lib.ZSTD_f_zstd1
+FORMAT_ZSTD1_MAGICLESS = lib.ZSTD_f_zstd1_magicless
+
+FLUSH_BLOCK = 0
+FLUSH_FRAME = 1
+
+COMPRESSOBJ_FLUSH_FINISH = 0
+COMPRESSOBJ_FLUSH_BLOCK = 1
+
+
+def _cpu_count():
+    # os.cpu_count() was introducd in Python 3.4.
+    try:
+        return os.cpu_count() or 0
+    except AttributeError:
+        pass
+
+    # Linux.
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except (AttributeError, ValueError):
+        pass
+
+    # TODO implement on other platforms.
+    return 0
+
+
+class BufferSegment:
+    """Represents a segment within a ``BufferWithSegments``.
+
+    This type is essentially a reference to N bytes within a
+    ``BufferWithSegments``.
+
+    The object conforms to the buffer protocol.
+    """
+
+    @property
+    def offset(self):
+        """The byte offset of this segment within its parent buffer."""
+        raise NotImplementedError()
+
+    def __len__(self):
+        """Obtain the length of the segment, in bytes."""
+        raise NotImplementedError()
+
+    def tobytes(self):
+        """Obtain bytes copy of this segment."""
+        raise NotImplementedError()
+
+
+class BufferSegments:
+    """Represents an array of ``(offset, length)`` integers.
+
+    This type is effectively an index used by :py:class:`BufferWithSegments`.
+
+    The array members are 64-bit unsigned integers using host/native bit order.
+
+    Instances conform to the buffer protocol.
+    """
+
+
+class BufferWithSegments:
+    """A memory buffer containing N discrete items of known lengths.
+
+    This type is essentially a fixed size memory address and an array
+    of 2-tuples of ``(offset, length)`` 64-bit unsigned native-endian
+    integers defining the byte offset and length of each segment within
+    the buffer.
+
+    Instances behave like containers.
+
+    Instances also conform to the buffer protocol. So a reference to the
+    backing bytes can be obtained via ``memoryview(o)``. A *copy* of the
+    backing bytes can be obtained via ``.tobytes()``.
+
+    This type exists to facilitate operations against N>1 items without
+    the overhead of Python object creation and management. Used with
+    APIs like :py:meth:`ZstdDecompressor.multi_decompress_to_buffer`, it
+    is possible to decompress many objects in parallel without the GIL
+    held, leading to even better performance.
+    """
+
+    @property
+    def size(self):
+        """Total sizein bytes of the backing buffer."""
+        raise NotImplementedError()
+
+    def __len__(self):
+        raise NotImplementedError()
+
+    def __getitem__(self, i):
+        """Obtains a segment within the buffer.
+
+        The returned object references memory within this buffer.
+
+        :param i:
+           Integer index of segment to retrieve.
+        :return:
+           :py:class:`BufferSegment`
+        """
+        raise NotImplementedError()
+
+    def segments(self):
+        """Obtain the array of ``(offset, length)`` segments in the buffer.
+
+        :return:
+           :py:class:`BufferSegments`
+        """
+        raise NotImplementedError()
+
+    def tobytes(self):
+        """Obtain bytes copy of this instance."""
+        raise NotImplementedError()
+
+
+class BufferWithSegmentsCollection:
+    """A virtual spanning view over multiple BufferWithSegments.
+
+    Instances are constructed from 1 or more :py:class:`BufferWithSegments`
+    instances. The resulting object behaves like an ordered sequence whose
+    members are the segments within each ``BufferWithSegments``.
+
+    If the object is composed of 2 ``BufferWithSegments`` instances with the
+    first having 2 segments and the second have 3 segments, then ``b[0]``
+    and ``b[1]`` access segments in the first object and ``b[2]``, ``b[3]``,
+    and ``b[4]`` access segments from the second.
+    """
+
+    def __len__(self):
+        """The number of segments within all ``BufferWithSegments``."""
+        raise NotImplementedError()
+
+    def __getitem__(self, i):
+        """Obtain the ``BufferSegment`` at an offset."""
+        raise NotImplementedError()
+
+
+class ZstdError(Exception):
+    pass
+
+
+def _zstd_error(zresult):
+    # Resolves to bytes on Python 2 and 3. We use the string for formatting
+    # into error messages, which will be literal unicode. So convert it to
+    # unicode.
+    return ffi.string(lib.ZSTD_getErrorName(zresult)).decode("utf-8")
+
+
+def _make_cctx_params(params):
+    res = lib.ZSTD_createCCtxParams()
+    if res == ffi.NULL:
+        raise MemoryError()
+
+    res = ffi.gc(res, lib.ZSTD_freeCCtxParams)
+
+    attrs = [
+        (lib.ZSTD_c_format, params.format),
+        (lib.ZSTD_c_compressionLevel, params.compression_level),
+        (lib.ZSTD_c_windowLog, params.window_log),
+        (lib.ZSTD_c_hashLog, params.hash_log),
+        (lib.ZSTD_c_chainLog, params.chain_log),
+        (lib.ZSTD_c_searchLog, params.search_log),
+        (lib.ZSTD_c_minMatch, params.min_match),
+        (lib.ZSTD_c_targetLength, params.target_length),
+        (lib.ZSTD_c_strategy, params.strategy),
+        (lib.ZSTD_c_contentSizeFlag, params.write_content_size),
+        (lib.ZSTD_c_checksumFlag, params.write_checksum),
+        (lib.ZSTD_c_dictIDFlag, params.write_dict_id),
+        (lib.ZSTD_c_nbWorkers, params.threads),
+        (lib.ZSTD_c_jobSize, params.job_size),
+        (lib.ZSTD_c_overlapLog, params.overlap_log),
+        (lib.ZSTD_c_forceMaxWindow, params.force_max_window),
+        (lib.ZSTD_c_enableLongDistanceMatching, params.enable_ldm),
+        (lib.ZSTD_c_ldmHashLog, params.ldm_hash_log),
+        (lib.ZSTD_c_ldmMinMatch, params.ldm_min_match),
+        (lib.ZSTD_c_ldmBucketSizeLog, params.ldm_bucket_size_log),
+        (lib.ZSTD_c_ldmHashRateLog, params.ldm_hash_rate_log),
+    ]
+
+    for param, value in attrs:
+        _set_compression_parameter(res, param, value)
+
+    return res
+
+
+class ZstdCompressionParameters(object):
+    """Low-level zstd compression parameters.
+
+    This type represents a collection of parameters to control how zstd
+    compression is performed.
+
+    Instances can be constructed from raw parameters or derived from a
+    base set of defaults specified from a compression level (recommended)
+    via :py:meth:`ZstdCompressionParameters.from_level`.
+
+    >>> # Derive compression settings for compression level 7.
+    >>> params = zstandard.ZstdCompressionParameters.from_level(7)
+
+    >>> # With an input size of 1MB
+    >>> params = zstandard.ZstdCompressionParameters.from_level(7, source_size=1048576)
+
+    Using ``from_level()``, it is also possible to override individual compression
+    parameters or to define additional settings that aren't automatically derived.
+    e.g.:
+
+    >>> params = zstandard.ZstdCompressionParameters.from_level(4, window_log=10)
+    >>> params = zstandard.ZstdCompressionParameters.from_level(5, threads=4)
+
+    Or you can define low-level compression settings directly:
+
+    >>> params = zstandard.ZstdCompressionParameters(window_log=12, enable_ldm=True)
+
+    Once a ``ZstdCompressionParameters`` instance is obtained, it can be used to
+    configure a compressor:
+
+    >>> cctx = zstandard.ZstdCompressor(compression_params=params)
+
+    Some of these are very low-level settings. It may help to consult the official
+    zstandard documentation for their behavior. Look for the ``ZSTD_p_*`` constants
+    in ``zstd.h`` (https://github.com/facebook/zstd/blob/dev/lib/zstd.h).
+    """
+
+    @staticmethod
+    def from_level(level, source_size=0, dict_size=0, **kwargs):
+        """Create compression parameters from a compression level.
+
+        :param level:
+           Integer compression level.
+        :param source_size:
+           Integer size in bytes of source to be compressed.
+        :param dict_size:
+           Integer size in bytes of compression dictionary to use.
+        :return:
+           :py:class:`ZstdCompressionParameters`
+        """
+        params = lib.ZSTD_getCParams(level, source_size, dict_size)
+
+        args = {
+            "window_log": "windowLog",
+            "chain_log": "chainLog",
+            "hash_log": "hashLog",
+            "search_log": "searchLog",
+            "min_match": "minMatch",
+            "target_length": "targetLength",
+            "strategy": "strategy",
+        }
+
+        for arg, attr in args.items():
+            if arg not in kwargs:
+                kwargs[arg] = getattr(params, attr)
+
+        return ZstdCompressionParameters(**kwargs)
+
+    def __init__(
+        self,
+        format=0,
+        compression_level=0,
+        window_log=0,
+        hash_log=0,
+        chain_log=0,
+        search_log=0,
+        min_match=0,
+        target_length=0,
+        strategy=-1,
+        write_content_size=1,
+        write_checksum=0,
+        write_dict_id=0,
+        job_size=0,
+        overlap_log=-1,
+        force_max_window=0,
+        enable_ldm=0,
+        ldm_hash_log=0,
+        ldm_min_match=0,
+        ldm_bucket_size_log=0,
+        ldm_hash_rate_log=-1,
+        threads=0,
+    ):
+        params = lib.ZSTD_createCCtxParams()
+        if params == ffi.NULL:
+            raise MemoryError()
+
+        params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
+
+        self._params = params
+
+        if threads < 0:
+            threads = _cpu_count()
+
+        # We need to set ZSTD_c_nbWorkers before ZSTD_c_jobSize and ZSTD_c_overlapLog
+        # because setting ZSTD_c_nbWorkers resets the other parameters.
+        _set_compression_parameter(params, lib.ZSTD_c_nbWorkers, threads)
+
+        _set_compression_parameter(params, lib.ZSTD_c_format, format)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_compressionLevel, compression_level
+        )
+        _set_compression_parameter(params, lib.ZSTD_c_windowLog, window_log)
+        _set_compression_parameter(params, lib.ZSTD_c_hashLog, hash_log)
+        _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
+        _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
+        _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_targetLength, target_length
+        )
+
+        if strategy == -1:
+            strategy = 0
+
+        _set_compression_parameter(params, lib.ZSTD_c_strategy, strategy)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_contentSizeFlag, write_content_size
+        )
+        _set_compression_parameter(
+            params, lib.ZSTD_c_checksumFlag, write_checksum
+        )
+        _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
+        _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
+
+        if overlap_log == -1:
+            overlap_log = 0
+
+        _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_forceMaxWindow, force_max_window
+        )
+        _set_compression_parameter(
+            params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
+        )
+        _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+        )
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
+        )
+
+        if ldm_hash_rate_log == -1:
+            ldm_hash_rate_log = 0
+
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+        )
+
+    @property
+    def format(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_format)
+
+    @property
+    def compression_level(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_compressionLevel
+        )
+
+    @property
+    def window_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_windowLog)
+
+    @property
+    def hash_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_hashLog)
+
+    @property
+    def chain_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_chainLog)
+
+    @property
+    def search_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_searchLog)
+
+    @property
+    def min_match(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_minMatch)
+
+    @property
+    def target_length(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_targetLength)
+
+    @property
+    def strategy(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_strategy)
+
+    @property
+    def write_content_size(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_contentSizeFlag
+        )
+
+    @property
+    def write_checksum(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_checksumFlag)
+
+    @property
+    def write_dict_id(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_dictIDFlag)
+
+    @property
+    def job_size(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_jobSize)
+
+    @property
+    def overlap_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_overlapLog)
+
+    @property
+    def force_max_window(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_forceMaxWindow
+        )
+
+    @property
+    def enable_ldm(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_enableLongDistanceMatching
+        )
+
+    @property
+    def ldm_hash_log(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashLog)
+
+    @property
+    def ldm_min_match(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmMinMatch)
+
+    @property
+    def ldm_bucket_size_log(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmBucketSizeLog
+        )
+
+    @property
+    def ldm_hash_rate_log(self):
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmHashRateLog
+        )
+
+    @property
+    def threads(self):
+        return _get_compression_parameter(self._params, lib.ZSTD_c_nbWorkers)
+
+    def estimated_compression_context_size(self):
+        """Estimated size in bytes needed to compress with these parameters."""
+        return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params)
+
+
+def estimate_decompression_context_size():
+    """Estimate the memory size requirements for a decompressor instance.
+
+    :return:
+       Integer number of bytes.
+    """
+    return lib.ZSTD_estimateDCtxSize()
+
+
+def _set_compression_parameter(params, param, value):
+    zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError(
+            "unable to set compression context parameter: %s"
+            % _zstd_error(zresult)
+        )
+
+
+def _get_compression_parameter(params, param):
+    result = ffi.new("int *")
+
+    zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError(
+            "unable to get compression context parameter: %s"
+            % _zstd_error(zresult)
+        )
+
+    return result[0]
+
+
+class ZstdCompressionWriter(object):
+    """Writable compressing stream wrapper.
+
+    ``ZstdCompressionWriter`` is a write-only stream interface for writing
+    compressed data to another stream.
+
+    This type conforms to the ``io.RawIOBase`` interface and should be usable
+    by any type that operates against a *file-object* (``typing.BinaryIO``
+    in Python type hinting speak). Only methods that involve writing will do
+    useful things.
+
+    As data is written to this stream (e.g. via ``write()``), that data
+    is sent to the compressor. As compressed data becomes available from
+    the compressor, it is sent to the underlying stream by calling its
+    ``write()`` method.
+
+    Both ``write()`` and ``flush()`` return the number of bytes written to the
+    object's ``write()``. In many cases, small inputs do not accumulate enough
+    data to cause a write and ``write()`` will return ``0``.
+
+    Calling ``close()`` will mark the stream as closed and subsequent I/O
+    operations will raise ``ValueError`` (per the documented behavior of
+    ``io.RawIOBase``). ``close()`` will also call ``close()`` on the underlying
+    stream if such a method exists and the instance was constructed with
+    ``closefd=True``
+
+    Instances are obtained by calling :py:meth:`ZstdCompressor.stream_writer`.
+
+    Typically usage is as follows:
+
+    >>> cctx = zstandard.ZstdCompressor(level=10)
+    >>> compressor = cctx.stream_writer(fh)
+    >>> compressor.write(b"chunk 0\\n")
+    >>> compressor.write(b"chunk 1\\n")
+    >>> compressor.flush()
+    >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\n`` at this point.
+    >>> # Receiver is also expecting more data in the zstd *frame*.
+    >>>
+    >>> compressor.write(b"chunk 2\\n")
+    >>> compressor.flush(zstandard.FLUSH_FRAME)
+    >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\nchunk 2``.
+    >>> # Receiver is expecting no more data, as the zstd frame is closed.
+    >>> # Any future calls to ``write()`` at this point will construct a new
+    >>> # zstd frame.
+
+    Instances can be used as context managers. Exiting the context manager is
+    the equivalent of calling ``close()``, which is equivalent to calling
+    ``flush(zstandard.FLUSH_FRAME)``:
+
+    >>> cctx = zstandard.ZstdCompressor(level=10)
+    >>> with cctx.stream_writer(fh) as compressor:
+    ...     compressor.write(b'chunk 0')
+    ...     compressor.write(b'chunk 1')
+    ...     ...
+
+    .. important::
+
+       If ``flush(FLUSH_FRAME)`` is not called, emitted data doesn't
+       constitute a full zstd *frame* and consumers of this data may complain
+       about malformed input. It is recommended to use instances as a context
+       manager to ensure *frames* are properly finished.
+
+    If the size of the data being fed to this streaming compressor is known,
+    you can declare it before compression begins:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> with cctx.stream_writer(fh, size=data_len) as compressor:
+    ...     compressor.write(chunk0)
+    ...     compressor.write(chunk1)
+    ...     ...
+
+    Declaring the size of the source data allows compression parameters to
+    be tuned. And if ``write_content_size`` is used, it also results in the
+    content size being written into the frame header of the output data.
+
+    The size of chunks being ``write()`` to the destination can be specified:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> with cctx.stream_writer(fh, write_size=32768) as compressor:
+    ...     ...
+
+    To see how much memory is being used by the streaming compressor:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> with cctx.stream_writer(fh) as compressor:
+    ...     ...
+    ...     byte_size = compressor.memory_size()
+
+    Thte total number of bytes written so far are exposed via ``tell()``:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> with cctx.stream_writer(fh) as compressor:
+    ...     ...
+    ...     total_written = compressor.tell()
+
+    ``stream_writer()`` accepts a ``write_return_read`` boolean argument to
+    control the return value of ``write()``. When ``False`` (the default),
+    ``write()`` returns the number of bytes that were ``write()``'en to the
+    underlying object. When ``True``, ``write()`` returns the number of bytes
+    read from the input that were subsequently written to the compressor.
+    ``True`` is the *proper* behavior for ``write()`` as specified by the
+    ``io.RawIOBase`` interface and will become the default value in a future
+    release.
+    """
+
+    def __init__(
+        self,
+        compressor,
+        writer,
+        source_size,
+        write_size,
+        write_return_read,
+        closefd=True,
+    ):
+        self._compressor = compressor
+        self._writer = writer
+        self._write_size = write_size
+        self._write_return_read = bool(write_return_read)
+        self._closefd = bool(closefd)
+        self._entered = False
+        self._closing = False
+        self._closed = False
+        self._bytes_compressed = 0
+
+        self._dst_buffer = ffi.new("char[]", write_size)
+        self._out_buffer = ffi.new("ZSTD_outBuffer *")
+        self._out_buffer.dst = self._dst_buffer
+        self._out_buffer.size = len(self._dst_buffer)
+        self._out_buffer.pos = 0
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+    def __enter__(self):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._entered:
+            raise ZstdError("cannot __enter__ multiple times")
+
+        self._entered = True
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+        self.close()
+        self._compressor = None
+
+        return False
+
+    def __iter__(self):
+        raise io.UnsupportedOperation()
+
+    def __next__(self):
+        raise io.UnsupportedOperation()
+
+    def memory_size(self):
+        return lib.ZSTD_sizeof_CCtx(self._compressor._cctx)
+
+    def fileno(self):
+        f = getattr(self._writer, "fileno", None)
+        if f:
+            return f()
+        else:
+            raise OSError("fileno not available on underlying writer")
+
+    def close(self):
+        if self._closed:
+            return
+
+        try:
+            self._closing = True
+            self.flush(FLUSH_FRAME)
+        finally:
+            self._closing = False
+            self._closed = True
+
+        # Call close() on underlying stream as well.
+        f = getattr(self._writer, "close", None)
+        if self._closefd and f:
+            f()
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def isatty(self):
+        return False
+
+    def readable(self):
+        return False
+
+    def readline(self, size=-1):
+        raise io.UnsupportedOperation()
+
+    def readlines(self, hint=-1):
+        raise io.UnsupportedOperation()
+
+    def seek(self, offset, whence=None):
+        raise io.UnsupportedOperation()
+
+    def seekable(self):
+        return False
+
+    def truncate(self, size=None):
+        raise io.UnsupportedOperation()
+
+    def writable(self):
+        return True
+
+    def writelines(self, lines):
+        raise NotImplementedError("writelines() is not yet implemented")
+
+    def read(self, size=-1):
+        raise io.UnsupportedOperation()
+
+    def readall(self):
+        raise io.UnsupportedOperation()
+
+    def readinto(self, b):
+        raise io.UnsupportedOperation()
+
+    def write(self, data):
+        """Send data to the compressor and possibly to the inner stream."""
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        total_write = 0
+
+        data_buffer = ffi.from_buffer(data)
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        out_buffer = self._out_buffer
+        out_buffer.pos = 0
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx,
+                out_buffer,
+                in_buffer,
+                lib.ZSTD_e_continue,
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if out_buffer.pos:
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
+                total_write += out_buffer.pos
+                self._bytes_compressed += out_buffer.pos
+                out_buffer.pos = 0
+
+        if self._write_return_read:
+            return in_buffer.pos
+        else:
+            return total_write
+
+    def flush(self, flush_mode=FLUSH_BLOCK):
+        """Evict data from compressor's internal state and write it to inner stream.
+
+        Calling this method may result in 0 or more ``write()`` calls to the
+        inner stream.
+
+        This method will also call ``flush()`` on the inner stream, if such a
+        method exists.
+
+        :param flush_mode:
+           How to flush the zstd compressor.
+
+           ``zstandard.FLUSH_BLOCK`` will flush data already sent to the
+           compressor but not emitted to the inner stream. The stream is still
+           writable after calling this. This is the default behavior.
+
+           See documentation for other ``zstandard.FLUSH_*`` constants for more
+           flushing options.
+        :return:
+           Integer number of bytes written to the inner stream.
+        """
+
+        if flush_mode == FLUSH_BLOCK:
+            flush = lib.ZSTD_e_flush
+        elif flush_mode == FLUSH_FRAME:
+            flush = lib.ZSTD_e_end
+        else:
+            raise ValueError("unknown flush_mode: %r" % flush_mode)
+
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        total_write = 0
+
+        out_buffer = self._out_buffer
+        out_buffer.pos = 0
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        in_buffer.src = ffi.NULL
+        in_buffer.size = 0
+        in_buffer.pos = 0
+
+        while True:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, out_buffer, in_buffer, flush
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if out_buffer.pos:
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
+                total_write += out_buffer.pos
+                self._bytes_compressed += out_buffer.pos
+                out_buffer.pos = 0
+
+            if not zresult:
+                break
+
+        f = getattr(self._writer, "flush", None)
+        if f and not self._closing:
+            f()
+
+        return total_write
+
+    def tell(self):
+        return self._bytes_compressed
+
+
+class ZstdCompressionObj(object):
+    """A compressor conforming to the API in Python's standard library.
+
+    This type implements an API similar to compression types in Python's
+    standard library such as ``zlib.compressobj`` and ``bz2.BZ2Compressor``.
+    This enables existing code targeting the standard library API to swap
+    in this type to achieve zstd compression.
+
+    .. important::
+
+       The design of this API is not ideal for optimal performance.
+
+       The reason performance is not optimal is because the API is limited to
+       returning a single buffer holding compressed data. When compressing
+       data, we don't know how much data will be emitted. So in order to
+       capture all this data in a single buffer, we need to perform buffer
+       reallocations and/or extra memory copies. This can add significant
+       overhead depending on the size or nature of the compressed data how
+       much your application calls this type.
+
+       If performance is critical, consider an API like
+       :py:meth:`ZstdCompressor.stream_reader`,
+       :py:meth:`ZstdCompressor.stream_writer`,
+       :py:meth:`ZstdCompressor.chunker`, or
+       :py:meth:`ZstdCompressor.read_to_iter`, which result in less overhead
+       managing buffers.
+
+    Instances are obtained by calling :py:meth:`ZstdCompressor.compressobj`.
+
+    Here is how this API should be used:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> cobj = cctx.compressobj()
+    >>> data = cobj.compress(b"raw input 0")
+    >>> data = cobj.compress(b"raw input 1")
+    >>> data = cobj.flush()
+
+    Or to flush blocks:
+
+    >>> cctx.zstandard.ZstdCompressor()
+    >>> cobj = cctx.compressobj()
+    >>> data = cobj.compress(b"chunk in first block")
+    >>> data = cobj.flush(zstandard.COMPRESSOBJ_FLUSH_BLOCK)
+    >>> data = cobj.compress(b"chunk in second block")
+    >>> data = cobj.flush()
+
+    For best performance results, keep input chunks under 256KB. This avoids
+    extra allocations for a large output object.
+
+    It is possible to declare the input size of the data that will be fed
+    into the compressor:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> cobj = cctx.compressobj(size=6)
+    >>> data = cobj.compress(b"foobar")
+    >>> data = cobj.flush()
+    """
+
+    def compress(self, data):
+        """Send data to the compressor.
+
+        This method receives bytes to feed to the compressor and returns
+        bytes constituting zstd compressed data.
+
+        The zstd compressor accumulates bytes and the returned bytes may be
+        substantially smaller or larger than the size of the input data on
+        any given call. The returned value may be the empty byte string
+        (``b""``).
+
+        :param data:
+           Data to write to the compressor.
+        :return:
+           Compressed data.
+        """
+        if self._finished:
+            raise ZstdError("cannot call compress() after compressor finished")
+
+        data_buffer = ffi.from_buffer(data)
+        source = ffi.new("ZSTD_inBuffer *")
+        source.src = data_buffer
+        source.size = len(data_buffer)
+        source.pos = 0
+
+        chunks = []
+
+        while source.pos < len(data):
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+        return b"".join(chunks)
+
+    def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
+        """Emit data accumulated in the compressor that hasn't been outputted yet.
+
+        The ``flush_mode`` argument controls how to end the stream.
+
+        ``zstandard.COMPRESSOBJ_FLUSH_FINISH`` (the default) ends the
+        compression stream and finishes a zstd frame. Once this type of flush
+        is performed, ``compress()`` and ``flush()`` can no longer be called.
+        This type of flush **must** be called to end the compression context. If
+        not called, the emitted data may be incomplete and may not be readable
+        by a decompressor.
+
+        ``zstandard.COMPRESSOBJ_FLUSH_BLOCK`` will flush a zstd block. This
+        ensures that all data fed to this instance will have been omitted and
+        can be decoded by a decompressor. Flushes of this type can be performed
+        multiple times. The next call to ``compress()`` will begin a new zstd
+        block.
+
+        :param flush_mode:
+           How to flush the zstd compressor.
+        :return:
+           Compressed data.
+        """
+        if flush_mode not in (
+            COMPRESSOBJ_FLUSH_FINISH,
+            COMPRESSOBJ_FLUSH_BLOCK,
+        ):
+            raise ValueError("flush mode not recognized")
+
+        if self._finished:
+            raise ZstdError("compressor object already finished")
+
+        if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
+            z_flush_mode = lib.ZSTD_e_flush
+        elif flush_mode == COMPRESSOBJ_FLUSH_FINISH:
+            z_flush_mode = lib.ZSTD_e_end
+            self._finished = True
+        else:
+            raise ZstdError("unhandled flush mode")
+
+        assert self._out.pos == 0
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        in_buffer.src = ffi.NULL
+        in_buffer.size = 0
+        in_buffer.pos = 0
+
+        chunks = []
+
+        while True:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, in_buffer, z_flush_mode
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+            if not zresult:
+                break
+
+        return b"".join(chunks)
+
+
+class ZstdCompressionChunker(object):
+    """Compress data to uniformly sized chunks.
+
+    This type allows you to iteratively feed chunks of data into a compressor
+    and produce output chunks of uniform size.
+
+    ``compress()``, ``flush()``, and ``finish()`` all return an iterator of
+    ``bytes`` instances holding compressed data. The iterator may be empty.
+    Callers MUST iterate through all elements of the returned iterator before
+    performing another operation on the object or else the compressor's
+    internal state may become confused. This can result in an exception being
+    raised or malformed data being emitted.
+
+    All chunks emitted by ``compress()`` will have a length of the configured
+    chunk size.
+
+    ``flush()`` and ``finish()`` may return a final chunk smaller than
+    the configured chunk size.
+
+    Instances are obtained by calling :py:meth:`ZstdCompressor.chunker`.
+
+    Here is how the API should be used:
+
+    >>> cctx = zstandard.ZstdCompressor()
+    >>> chunker = cctx.chunker(chunk_size=32768)
+    >>>
+    >>> with open(path, 'rb') as fh:
+    ...     while True:
+    ...         in_chunk = fh.read(32768)
+    ...         if not in_chunk:
+    ...             break
+    ...
+    ...         for out_chunk in chunker.compress(in_chunk):
+    ...             # Do something with output chunk of size 32768.
+    ...
+    ...     for out_chunk in chunker.finish():
+    ...         # Do something with output chunks that finalize the zstd frame.
+
+    This compressor type is often a better alternative to
+    :py:class:`ZstdCompressor.compressobj` because it has better performance
+    properties.
+
+    ``compressobj()`` will emit output data as it is available. This results
+    in a *stream* of output chunks of varying sizes. The consistency of the
+    output chunk size with ``chunker()`` is more appropriate for many usages,
+    such as sending compressed data to a socket.
+
+    ``compressobj()`` may also perform extra memory reallocations in order
+    to dynamically adjust the sizes of the output chunks. Since ``chunker()``
+    output chunks are all the same size (except for flushed or final chunks),
+    there is less memory allocation/copying overhead.
+    """
+
+    def __init__(self, compressor, chunk_size):
+        self._compressor = compressor
+        self._out = ffi.new("ZSTD_outBuffer *")
+        self._dst_buffer = ffi.new("char[]", chunk_size)
+        self._out.dst = self._dst_buffer
+        self._out.size = chunk_size
+        self._out.pos = 0
+
+        self._in = ffi.new("ZSTD_inBuffer *")
+        self._in.src = ffi.NULL
+        self._in.size = 0
+        self._in.pos = 0
+        self._finished = False
+
+    def compress(self, data):
+        """Feed new input data into the compressor.
+
+        :param data:
+           Data to feed to compressor.
+        :return:
+           Iterator of ``bytes`` representing chunks of compressed data.
+        """
+        if self._finished:
+            raise ZstdError("cannot call compress() after compression finished")
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError(
+                "cannot perform operation before consuming output "
+                "from previous operation"
+            )
+
+        data_buffer = ffi.from_buffer(data)
+
+        if not len(data_buffer):
+            return
+
+        self._in.src = data_buffer
+        self._in.size = len(data_buffer)
+        self._in.pos = 0
+
+        while self._in.pos < self._in.size:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_continue
+            )
+
+            if self._in.pos == self._in.size:
+                self._in.src = ffi.NULL
+                self._in.size = 0
+                self._in.pos = 0
+
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if self._out.pos == self._out.size:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+    def flush(self):
+        """Flushes all data currently in the compressor.
+
+        :return:
+           Iterator of ``bytes`` of compressed data.
+        """
+        if self._finished:
+            raise ZstdError("cannot call flush() after compression finished")
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError(
+                "cannot call flush() before consuming output from "
+                "previous operation"
+            )
+
+        while True:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if self._out.pos:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+            if not zresult:
+                return
+
+    def finish(self):
+        """Signals the end of input data.
+
+        No new data can be compressed after this method is called.
+
+        This method will flush buffered data and finish the zstd frame.
+
+        :return:
+           Iterator of ``bytes`` of compressed data.
+        """
+        if self._finished:
+            raise ZstdError("cannot call finish() after compression finished")
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError(
+                "cannot call finish() before consuming output from "
+                "previous operation"
+            )
+
+        while True:
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
+
+            if self._out.pos:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+            if not zresult:
+                self._finished = True
+                return
+
+
+class ZstdCompressionReader(object):
+    """Readable compressing stream wrapper.
+
+    ``ZstdCompressionReader`` is a read-only stream interface for obtaining
+    compressed data from a source.
+
+    This type conforms to the ``io.RawIOBase`` interface and should be usable
+    by any type that operates against a *file-object* (``typing.BinaryIO``
+    in Python type hinting speak).
+
+    Instances are neither writable nor seekable (even if the underlying
+    source is seekable). ``readline()`` and ``readlines()`` are not implemented
+    because they don't make sense for compressed data. ``tell()`` returns the
+    number of compressed bytes emitted so far.
+
+    Instances are obtained by calling :py:meth:`ZstdCompressor.stream_reader`.
+
+    In this example, we open a file for reading and then wrap that file
+    handle with a stream from which compressed data can be ``read()``.
+
+    >>> with open(path, 'rb') as fh:
+    ...     cctx = zstandard.ZstdCompressor()
+    ...     reader = cctx.stream_reader(fh)
+    ...     while True:
+    ...         chunk = reader.read(16384)
+    ...         if not chunk:
+    ...             break
+    ...
+    ...         # Do something with compressed chunk.
+
+    Instances can also be used as context managers:
+
+    >>> with open(path, 'rb') as fh:
+    ...     cctx = zstandard.ZstdCompressor()
+    ...     with cctx.stream_reader(fh) as reader:
+    ...         while True:
+    ...             chunk = reader.read(16384)
+    ...             if not chunk:
+    ...                 break
+    ...
+    ...             # Do something with compressed chunk.
+
+    When the context manager exits or ``close()`` is called, the stream is
+    closed, underlying resources are released, and future operations against
+    the compression stream will fail.
+
+    ``stream_reader()`` accepts a ``size`` argument specifying how large the
+    input stream is. This is used to adjust compression parameters so they are
+    tailored to the source size. e.g.
+
+    >>> with open(path, 'rb') as fh:
+    ...     cctx = zstandard.ZstdCompressor()
+    ...     with cctx.stream_reader(fh, size=os.stat(path).st_size) as reader:
+    ...         ...
+
+    If the ``source`` is a stream, you can specify how large ``read()``
+    requests to that stream should be via the ``read_size`` argument.
+    It defaults to ``zstandard.COMPRESSION_RECOMMENDED_INPUT_SIZE``. e.g.
+
+    >>> with open(path, 'rb') as fh:
+    ...     cctx = zstandard.ZstdCompressor()
+    ...     # Will perform fh.read(8192) when obtaining data to feed into the
+    ...     # compressor.
+    ...     with cctx.stream_reader(fh, read_size=8192) as reader:
+    ...         ...
+    """
+
+    def __init__(self, compressor, source, read_size, closefd=True):
+        self._compressor = compressor
+        self._source = source
+        self._read_size = read_size
+        self._closefd = closefd
+        self._entered = False
+        self._closed = False
+        self._bytes_compressed = 0
+        self._finished_input = False
+        self._finished_output = False
+
+        self._in_buffer = ffi.new("ZSTD_inBuffer *")
+        # Holds a ref so backing bytes in self._in_buffer stay alive.
+        self._source_buffer = None
+
+    def __enter__(self):
+        if self._entered:
+            raise ValueError("cannot __enter__ multiple times")
+
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        self._entered = True
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+        self._compressor = None
+        self.close()
+        self._source = None
+
+        return False
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return False
+
+    def seekable(self):
+        return False
+
+    def readline(self):
+        raise io.UnsupportedOperation()
+
+    def readlines(self):
+        raise io.UnsupportedOperation()
+
+    def write(self, data):
+        raise OSError("stream is not writable")
+
+    def writelines(self, ignored):
+        raise OSError("stream is not writable")
+
+    def isatty(self):
+        return False
+
+    def flush(self):
+        return None
+
+    def close(self):
+        if self._closed:
+            return
+
+        self._closed = True
+
+        f = getattr(self._source, "close", None)
+        if self._closefd and f:
+            f()
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def tell(self):
+        return self._bytes_compressed
+
+    def readall(self):
+        chunks = []
+
+        while True:
+            chunk = self.read(1048576)
+            if not chunk:
+                break
+
+            chunks.append(chunk)
+
+        return b"".join(chunks)
+
+    def __iter__(self):
+        raise io.UnsupportedOperation()
+
+    def __next__(self):
+        raise io.UnsupportedOperation()
+
+    next = __next__
+
+    def _read_input(self):
+        if self._finished_input:
+            return
+
+        if hasattr(self._source, "read"):
+            data = self._source.read(self._read_size)
+
+            if not data:
+                self._finished_input = True
+                return
+
+            self._source_buffer = ffi.from_buffer(data)
+            self._in_buffer.src = self._source_buffer
+            self._in_buffer.size = len(self._source_buffer)
+            self._in_buffer.pos = 0
+        else:
+            self._source_buffer = ffi.from_buffer(self._source)
+            self._in_buffer.src = self._source_buffer
+            self._in_buffer.size = len(self._source_buffer)
+            self._in_buffer.pos = 0
+
+    def _compress_into_buffer(self, out_buffer):
+        if self._in_buffer.pos >= self._in_buffer.size:
+            return
+
+        old_pos = out_buffer.pos
+
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx,
+            out_buffer,
+            self._in_buffer,
+            lib.ZSTD_e_continue,
+        )
+
+        self._bytes_compressed += out_buffer.pos - old_pos
+
+        if self._in_buffer.pos == self._in_buffer.size:
+            self._in_buffer.src = ffi.NULL
+            self._in_buffer.pos = 0
+            self._in_buffer.size = 0
+            self._source_buffer = None
+
+            if not hasattr(self._source, "read"):
+                self._finished_input = True
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError("zstd compress error: %s", _zstd_error(zresult))
+
+        return out_buffer.pos and out_buffer.pos == out_buffer.size
+
+    def read(self, size=-1):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if size < -1:
+            raise ValueError("cannot read negative amounts less than -1")
+
+        if size == -1:
+            return self.readall()
+
+        if self._finished_output or size == 0:
+            return b""
+
+        # Need a dedicated ref to dest buffer otherwise it gets collected.
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dst_buffer
+        out_buffer.size = size
+        out_buffer.pos = 0
+
+        if self._compress_into_buffer(out_buffer):
+            return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+        while not self._finished_input:
+            self._read_input()
+
+            if self._compress_into_buffer(out_buffer):
+                return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+        # EOF
+        old_pos = out_buffer.pos
+
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
+
+        self._bytes_compressed += out_buffer.pos - old_pos
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
+
+        if zresult == 0:
+            self._finished_output = True
+
+        return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+    def read1(self, size=-1):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if size < -1:
+            raise ValueError("cannot read negative amounts less than -1")
+
+        if self._finished_output or size == 0:
+            return b""
+
+        # -1 returns arbitrary number of bytes.
+        if size == -1:
+            size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dst_buffer
+        out_buffer.size = size
+        out_buffer.pos = 0
+
+        # read1() dictates that we can perform at most 1 call to the
+        # underlying stream to get input. However, we can't satisfy this
+        # restriction with compression because not all input generates output.
+        # It is possible to perform a block flush in order to ensure output.
+        # But this may not be desirable behavior. So we allow multiple read()
+        # to the underlying stream. But unlike read(), we stop once we have
+        # any output.
+
+        self._compress_into_buffer(out_buffer)
+        if out_buffer.pos:
+            return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+        while not self._finished_input:
+            self._read_input()
+
+            # If we've filled the output buffer, return immediately.
+            if self._compress_into_buffer(out_buffer):
+                return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+            # If we've populated the output buffer and we're not at EOF,
+            # also return, as we've satisfied the read1() limits.
+            if out_buffer.pos and not self._finished_input:
+                return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+            # Else if we're at EOS and we have room left in the buffer,
+            # fall through to below and try to add more data to the output.
+
+        # EOF.
+        old_pos = out_buffer.pos
+
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
+
+        self._bytes_compressed += out_buffer.pos - old_pos
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error ending compression stream: %s" % _zstd_error(zresult)
+            )
+
+        if zresult == 0:
+            self._finished_output = True
+
+        return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+    def readinto(self, b):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._finished_output:
+            return 0
+
+        # TODO use writable=True once we require CFFI >= 1.12.
+        dest_buffer = ffi.from_buffer(b)
+        ffi.memmove(b, b"", 0)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dest_buffer
+        out_buffer.size = len(dest_buffer)
+        out_buffer.pos = 0
+
+        if self._compress_into_buffer(out_buffer):
+            return out_buffer.pos
+
+        while not self._finished_input:
+            self._read_input()
+            if self._compress_into_buffer(out_buffer):
+                return out_buffer.pos
+
+        # EOF.
+        old_pos = out_buffer.pos
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
+
+        self._bytes_compressed += out_buffer.pos - old_pos
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
+
+        if zresult == 0:
+            self._finished_output = True
+
+        return out_buffer.pos
+
+    def readinto1(self, b):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._finished_output:
+            return 0
+
+        # TODO use writable=True once we require CFFI >= 1.12.
+        dest_buffer = ffi.from_buffer(b)
+        ffi.memmove(b, b"", 0)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dest_buffer
+        out_buffer.size = len(dest_buffer)
+        out_buffer.pos = 0
+
+        self._compress_into_buffer(out_buffer)
+        if out_buffer.pos:
+            return out_buffer.pos
+
+        while not self._finished_input:
+            self._read_input()
+
+            if self._compress_into_buffer(out_buffer):
+                return out_buffer.pos
+
+            if out_buffer.pos and not self._finished_input:
+                return out_buffer.pos
+
+        # EOF.
+        old_pos = out_buffer.pos
+
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
+
+        self._bytes_compressed += out_buffer.pos - old_pos
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error ending compression stream: %s" % _zstd_error(zresult)
+            )
+
+        if zresult == 0:
+            self._finished_output = True
+
+        return out_buffer.pos
+
+
+class ZstdCompressor(object):
+    """
+    Create an object used to perform Zstandard compression.
+
+    Each instance is essentially a wrapper around a ``ZSTD_CCtx`` from
+    zstd's C API.
+
+    An instance can compress data various ways. Instances can be used
+    multiple times. Each compression operation will use the compression
+    parameters defined at construction time.
+
+    .. note:
+
+       When using a compression dictionary and multiple compression
+       operations are performed, the ``ZstdCompressionParameters`` derived
+       from an integer compression ``level`` and the first compressed data's
+       size will be reused for all subsequent operations. This may not be
+       desirable if source data sizes vary significantly.
+
+    ``compression_params`` is mutually exclusive with ``level``,
+    ``write_checksum``, ``write_content_size``, ``write_dict_id``, and
+    ``threads``.
+
+    Assume that each ``ZstdCompressor`` instance can only handle a single
+    logical compression operation at the same time. i.e. if you call a method
+    like ``stream_reader()`` to obtain multiple objects derived from the same
+    ``ZstdCompressor`` instance and attempt to use them simultaneously, errors
+    will likely occur.
+
+    If you need to perform multiple logical compression operations and you
+    can't guarantee those operations are temporally non-overlapping, you need
+    to obtain multiple ``ZstdCompressor`` instances.
+
+    Unless specified otherwise, assume that no two methods of
+    ``ZstdCompressor`` instances can be called from multiple Python
+    threads simultaneously. In other words, assume instances are not thread safe
+    unless stated otherwise.
+
+    :param level:
+       Integer compression level. Valid values are all negative integers
+       through 22. Lower values generally yield faster operations with lower
+       compression ratios. Higher values are generally slower but compress
+       better. The default is 3, which is what the ``zstd`` CLI uses. Negative
+       levels effectively engage ``--fast`` mode from the ``zstd`` CLI.
+    :param dict_data:
+       A ``ZstdCompressionDict`` to be used to compress with dictionary
+        data.
+    :param compression_params:
+       A ``ZstdCompressionParameters`` instance defining low-level compression
+       parameters. If defined, this will overwrite the ``level`` argument.
+    :param write_checksum:
+       If True, a 4 byte content checksum will be written with the compressed
+       data, allowing the decompressor to perform content verification.
+    :param write_content_size:
+       If True (the default), the decompressed content size will be included
+       in the header of the compressed data. This data will only be written if
+       the compressor knows the size of the input data.
+    :param write_dict_id:
+       Determines whether the dictionary ID will be written into the compressed
+       data. Defaults to True. Only adds content to the compressed data if
+       a dictionary is being used.
+    :param threads:
+       Number of threads to use to compress data concurrently. When set,
+       compression operations are performed on multiple threads. The default
+       value (0) disables multi-threaded compression. A value of ``-1`` means
+       to set the number of threads to the number of detected logical CPUs.
+    """
+
+    def __init__(
+        self,
+        level=3,
+        dict_data=None,
+        compression_params=None,
+        write_checksum=None,
+        write_content_size=None,
+        write_dict_id=None,
+        threads=0,
+    ):
+        if level > lib.ZSTD_maxCLevel():
+            raise ValueError(
+                "level must be less than %d" % lib.ZSTD_maxCLevel()
+            )
+
+        if threads < 0:
+            threads = _cpu_count()
+
+        if compression_params and write_checksum is not None:
+            raise ValueError(
+                "cannot define compression_params and " "write_checksum"
+            )
+
+        if compression_params and write_content_size is not None:
+            raise ValueError(
+                "cannot define compression_params and " "write_content_size"
+            )
+
+        if compression_params and write_dict_id is not None:
+            raise ValueError(
+                "cannot define compression_params and " "write_dict_id"
+            )
+
+        if compression_params and threads:
+            raise ValueError("cannot define compression_params and threads")
+
+        if compression_params:
+            self._params = _make_cctx_params(compression_params)
+        else:
+            if write_dict_id is None:
+                write_dict_id = True
+
+            params = lib.ZSTD_createCCtxParams()
+            if params == ffi.NULL:
+                raise MemoryError()
+
+            self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
+
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_compressionLevel, level
+            )
+
+            _set_compression_parameter(
+                self._params,
+                lib.ZSTD_c_contentSizeFlag,
+                write_content_size if write_content_size is not None else 1,
+            )
+
+            _set_compression_parameter(
+                self._params,
+                lib.ZSTD_c_checksumFlag,
+                1 if write_checksum else 0,
+            )
+
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_dictIDFlag, 1 if write_dict_id else 0
+            )
+
+            if threads:
+                _set_compression_parameter(
+                    self._params, lib.ZSTD_c_nbWorkers, threads
+                )
+
+        cctx = lib.ZSTD_createCCtx()
+        if cctx == ffi.NULL:
+            raise MemoryError()
+
+        self._cctx = cctx
+        self._dict_data = dict_data
+
+        # We defer setting up garbage collection until after calling
+        # _setup_cctx() to ensure the memory size estimate is more accurate.
+        try:
+            self._setup_cctx()
+        finally:
+            self._cctx = ffi.gc(
+                cctx, lib.ZSTD_freeCCtx, size=lib.ZSTD_sizeof_CCtx(cctx)
+            )
+
+    def _setup_cctx(self):
+        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+            self._cctx, self._params
+        )
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "could not set compression parameters: %s"
+                % _zstd_error(zresult)
+            )
+
+        dict_data = self._dict_data
+
+        if dict_data:
+            if dict_data._cdict:
+                zresult = lib.ZSTD_CCtx_refCDict(self._cctx, dict_data._cdict)
+            else:
+                zresult = lib.ZSTD_CCtx_loadDictionary_advanced(
+                    self._cctx,
+                    dict_data.as_bytes(),
+                    len(dict_data),
+                    lib.ZSTD_dlm_byRef,
+                    dict_data._dict_type,
+                )
+
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "could not load compression dictionary: %s"
+                    % _zstd_error(zresult)
+                )
+
+    def memory_size(self):
+        """Obtain the memory usage of this compressor, in bytes.
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> memory = cctx.memory_size()
+        """
+        return lib.ZSTD_sizeof_CCtx(self._cctx)
+
+    def compress(self, data):
+        """
+        Compress data in a single operation.
+
+        This is the simplest mechanism to perform compression: simply pass in a
+        value and get a compressed value back. It is almost the most prone to
+        abuse.
+
+        The input and output values must fit in memory, so passing in very large
+        values can result in excessive memory usage. For this reason, one of the
+        streaming based APIs is preferred for larger values.
+
+        :param data:
+           Source data to compress
+        :return:
+           Compressed data
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> compressed = cctx.compress(b"data to compress")
+        """
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        data_buffer = ffi.from_buffer(data)
+
+        dest_size = lib.ZSTD_compressBound(len(data_buffer))
+        out = new_nonzero("char[]", dest_size)
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+
+        out_buffer.dst = out
+        out_buffer.size = dest_size
+        out_buffer.pos = 0
+
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        zresult = lib.ZSTD_compressStream2(
+            self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+        )
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError("cannot compress: %s" % _zstd_error(zresult))
+        elif zresult:
+            raise ZstdError("unexpected partial frame flush")
+
+        return ffi.buffer(out, out_buffer.pos)[:]
+
+    def compressobj(self, size=-1):
+        """
+        Obtain a compressor exposing the Python standard library compression API.
+
+        See :py:class:`ZstdCompressionObj` for the full documentation.
+
+        :param size:
+           Size in bytes of data that will be compressed.
+        :return:
+           :py:class:`ZstdCompressionObj`
+        """
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        cobj = ZstdCompressionObj()
+        cobj._out = ffi.new("ZSTD_outBuffer *")
+        cobj._dst_buffer = ffi.new(
+            "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        )
+        cobj._out.dst = cobj._dst_buffer
+        cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        cobj._out.pos = 0
+        cobj._compressor = self
+        cobj._finished = False
+
+        return cobj
+
+    def chunker(self, size=-1, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        """
+        Create an object for iterative compressing to same-sized chunks.
+
+        This API is similar to :py:meth:`ZstdCompressor.compressobj` but has
+        better performance properties.
+
+        :param size:
+           Size in bytes of data that will be compressed.
+        :param chunk_size:
+           Size of compressed chunks.
+        :return:
+           :py:class:`ZstdCompressionChunker`
+        """
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        return ZstdCompressionChunker(self, chunk_size=chunk_size)
+
+    def copy_stream(
+        self,
+        ifh,
+        ofh,
+        size=-1,
+        read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+        """
+        Copy data between 2 streams while compressing it.
+
+        Data will be read from ``ifh``, compressed, and written to ``ofh``.
+        ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a
+        ``write(data)``
+        method.
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
+        ...     cctx.copy_stream(ifh, ofh)
+
+        It is also possible to declare the size of the source stream:
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> cctx.copy_stream(ifh, ofh, size=len_of_input)
+
+        You can also specify how large the chunks that are ``read()``
+        and ``write()`` from and to the streams:
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> cctx.copy_stream(ifh, ofh, read_size=32768, write_size=16384)
+
+        The stream copier returns a 2-tuple of bytes read and written:
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> read_count, write_count = cctx.copy_stream(ifh, ofh)
+
+        :param ifh:
+           Source stream to read from
+        :param ofh:
+           Destination stream to write to
+        :param size:
+           Size in bytes of the source stream. If defined, compression
+           parameters will be tuned for this size.
+        :param read_size:
+           Chunk sizes that source stream should be ``read()`` from.
+        :param write_size:
+           Chunk sizes that destination stream should be ``write()`` to.
+        :return:
+           2-tuple of ints of bytes read and written, respectively.
+        """
+
+        if not hasattr(ifh, "read"):
+            raise ValueError("first argument must have a read() method")
+        if not hasattr(ofh, "write"):
+            raise ValueError("second argument must have a write() method")
+
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        total_read, total_write = 0, 0
+
+        while True:
+            data = ifh.read(read_size)
+            if not data:
+                break
+
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_compressStream2(
+                    self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                )
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
+
+                if out_buffer.pos:
+                    ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    total_write += out_buffer.pos
+                    out_buffer.pos = 0
+
+        # We've finished reading. Flush the compressor.
+        while True:
+            zresult = lib.ZSTD_compressStream2(
+                self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
+
+            if out_buffer.pos:
+                ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+            if zresult == 0:
+                break
+
+        return total_read, total_write
+
+    def stream_reader(
+        self,
+        source,
+        size=-1,
+        read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+        closefd=True,
+    ):
+        """
+        Wrap a readable source with a stream that can read compressed data.
+
+        This will produce an object conforming to the ``io.RawIOBase``
+        interface which can be ``read()`` from to retrieve compressed data
+        from a source.
+
+        The source object can be any object with a ``read(size)`` method
+        or an object that conforms to the buffer protocol.
+
+        See :py:class:`ZstdCompressionReader` for type documentation and usage
+        examples.
+
+        :param source:
+           Object to read source data from
+        :param size:
+           Size in bytes of source object.
+        :param read_size:
+           How many bytes to request when ``read()``'ing from the source.
+        :param closefd:
+           Whether to close the source stream when the returned stream is
+           closed.
+        :return:
+           :py:class:`ZstdCompressionReader`
+        """
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        try:
+            size = len(source)
+        except Exception:
+            pass
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        return ZstdCompressionReader(self, source, read_size, closefd=closefd)
+
+    def stream_writer(
+        self,
+        writer,
+        size=-1,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        write_return_read=True,
+        closefd=True,
+    ):
+        """
+        Create a stream that will write compressed data into another stream.
+
+        The argument to ``stream_writer()`` must have a ``write(data)`` method.
+        As compressed data is available, ``write()`` will be called with the
+        compressed data as its argument. Many common Python types implement
+        ``write()``, including open file handles and ``io.BytesIO``.
+
+        See :py:class:`ZstdCompressionWriter` for more documentation, including
+        usage examples.
+
+        :param writer:
+           Stream to write compressed data to.
+        :param size:
+           Size in bytes of data to be compressed. If set, it will be used
+           to influence compression parameter tuning and could result in the
+           size being written into the header of the compressed data.
+        :param write_size:
+           How much data to ``write()`` to ``writer`` at a time.
+        :param write_return_read:
+           Whether ``write()`` should return the number of bytes that were
+           consumed from the input.
+        :param closefd:
+           Whether to ``close`` the ``writer`` when this stream is closed.
+        :return:
+           :py:class:`ZstdCompressionWriter`
+        """
+        if not hasattr(writer, "write"):
+            raise ValueError("must pass an object with a write() method")
+
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        return ZstdCompressionWriter(
+            self, writer, size, write_size, write_return_read, closefd=closefd
+        )
+
+    def read_to_iter(
+        self,
+        reader,
+        size=-1,
+        read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+        """
+        Read uncompressed data from a reader and return an iterator
+
+        Returns an iterator of compressed data produced from reading from
+        ``reader``.
+
+        This method provides a mechanism to stream compressed data out of a
+        source as an iterator of data chunks.
+
+        Uncompressed data will be obtained from ``reader`` by calling the
+        ``read(size)`` method of it or by reading a slice (if ``reader``
+        conforms to the *buffer protocol*). The source data will be streamed
+        into a compressor. As compressed data is available, it will be exposed
+        to the iterator.
+
+        Data is read from the source in chunks of ``read_size``. Compressed
+        chunks are at most ``write_size`` bytes. Both values default to the
+        zstd input and and output defaults, respectively.
+
+        If reading from the source via ``read()``, ``read()`` will be called
+        until it raises or returns an empty bytes (``b""``). It is perfectly
+        valid for the source to deliver fewer bytes than were what requested
+        by ``read(size)``.
+
+        The caller is partially in control of how fast data is fed into the
+        compressor by how it consumes the returned iterator. The compressor
+        will not consume from the reader unless the caller consumes from the
+        iterator.
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> for chunk in cctx.read_to_iter(fh):
+        ...     # Do something with emitted data.
+
+        ``read_to_iter()`` accepts a ``size`` argument declaring the size of
+        the input stream:
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> for chunk in cctx.read_to_iter(fh, size=some_int):
+        >>>     pass
+
+        You can also control the size that data is ``read()`` from the source
+        and the ideal size of output chunks:
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> for chunk in cctx.read_to_iter(fh, read_size=16384, write_size=8192):
+        >>>     pass
+
+        ``read_to_iter()`` does not give direct control over the sizes of chunks
+        fed into the compressor. Instead, chunk sizes will be whatever the object
+        being read from delivers. These will often be of a uniform size.
+
+        :param reader:
+           Stream providing data to be compressed.
+        :param size:
+           Size in bytes of input data.
+        :param read_size:
+           Controls how many bytes are ``read()`` from the source.
+        :param write_size:
+           Controls the output size of emitted chunks.
+        :return:
+           Iterator of ``bytes``.
+        """
+
+        if hasattr(reader, "read"):
+            have_read = True
+        elif hasattr(reader, "__getitem__"):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError(
+                "must pass an object with a read() method or "
+                "conforms to buffer protocol"
+            )
+
+        lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        in_buffer.src = ffi.NULL
+        in_buffer.size = 0
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new("char[]", write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        while True:
+            # We should never have output data sitting around after a previous
+            # iteration.
+            assert out_buffer.pos == 0
+
+            # Collect input data.
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = len(reader) - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset : buffer_offset + slice_size]
+                buffer_offset += slice_size
+
+            # No new input data. Break out of the read loop.
+            if not read_result:
+                break
+
+            # Feed all read data into the compressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_compressStream2(
+                    self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                )
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+            assert out_buffer.pos == 0
+
+            # And repeat the loop to collect more data.
+            continue
+
+        # If we get here, input is exhausted. End the stream and emit what
+        # remains.
+        while True:
+            assert out_buffer.pos == 0
+            zresult = lib.ZSTD_compressStream2(
+                self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
+
+            if out_buffer.pos:
+                data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                out_buffer.pos = 0
+                yield data
+
+            if zresult == 0:
+                break
+
+    def multi_compress_to_buffer(self, data, threads=-1):
+        """
+        Compress multiple pieces of data as a single function call.
+
+        (Experimental. Not yet supported by CFFI backend.)
+
+        This function is optimized to perform multiple compression operations
+        as as possible with as little overhead as possible.
+
+        Data to be compressed can be passed as a ``BufferWithSegmentsCollection``,
+        a ``BufferWithSegments``, or a list containing byte like objects. Each
+        element of the container will be compressed individually using the
+        configured parameters on the ``ZstdCompressor`` instance.
+
+        The ``threads`` argument controls how many threads to use for
+        compression. The default is ``0`` which means to use a single thread.
+        Negative values use the number of logical CPUs in the machine.
+
+        The function returns a ``BufferWithSegmentsCollection``. This type
+        represents N discrete memory allocations, each holding 1 or more
+        compressed frames.
+
+        Output data is written to shared memory buffers. This means that unlike
+        regular Python objects, a reference to *any* object within the collection
+        keeps the shared buffer and therefore memory backing it alive. This can
+        have undesirable effects on process memory usage.
+
+        The API and behavior of this function is experimental and will likely
+        change. Known deficiencies include:
+
+        * If asked to use multiple threads, it will always spawn that many
+          threads, even if the input is too small to use them. It should
+          automatically lower the thread count when the extra threads would
+          just add overhead.
+        * The buffer allocation strategy is fixed. There is room to make it
+          dynamic, perhaps even to allow one output buffer per input,
+          facilitating a variation of the API to return a list without the
+          adverse effects of shared memory buffers.
+
+        :param data:
+           Source to read discrete pieces of data to compress.
+
+           Can be a ``BufferWithSegmentsCollection``, a ``BufferWithSegments``,
+           or a ``list[bytes]``.
+        :return:
+           BufferWithSegmentsCollection holding compressed data.
+        """
+        raise NotImplementedError()
+
+    def frame_progression(self):
+        """
+        Return information on how much work the compressor has done.
+
+        Returns a 3-tuple of (ingested, consumed, produced).
+
+        >>> cctx = zstandard.ZstdCompressor()
+        >>> (ingested, consumed, produced) = cctx.frame_progression()
+        """
+        progression = lib.ZSTD_getFrameProgression(self._cctx)
+
+        return progression.ingested, progression.consumed, progression.produced
+
+
+class FrameParameters(object):
+    """Information about a zstd frame.
+
+    Instances have the following attributes:
+
+    ``content_size``
+       Integer size of original, uncompressed content. This will be ``0`` if the
+       original content size isn't written to the frame (controlled with the
+       ``write_content_size`` argument to ``ZstdCompressor``) or if the input
+       content size was ``0``.
+
+    ``window_size``
+       Integer size of maximum back-reference distance in compressed data.
+
+    ``dict_id``
+       Integer of dictionary ID used for compression. ``0`` if no dictionary
+       ID was used or if the dictionary ID was ``0``.
+
+    ``has_checksum``
+       Bool indicating whether a 4 byte content checksum is stored at the end
+       of the frame.
+    """
+
+    def __init__(self, fparams):
+        self.content_size = fparams.frameContentSize
+        self.window_size = fparams.windowSize
+        self.dict_id = fparams.dictID
+        self.has_checksum = bool(fparams.checksumFlag)
+
+
+def frame_content_size(data):
+    """Obtain the decompressed size of a frame.
+
+    The returned value is usually accurate. But strictly speaking it should
+    not be trusted.
+
+    :return:
+       ``-1`` if size unknown and a non-negative integer otherwise.
+    """
+    data_buffer = ffi.from_buffer(data)
+
+    size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+
+    if size == lib.ZSTD_CONTENTSIZE_ERROR:
+        raise ZstdError("error when determining content size")
+    elif size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+        return -1
+    else:
+        return size
+
+
+def frame_header_size(data):
+    """Obtain the size of a frame header.
+
+    :return:
+       Integer size in bytes.
+    """
+    data_buffer = ffi.from_buffer(data)
+
+    zresult = lib.ZSTD_frameHeaderSize(data_buffer, len(data_buffer))
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError(
+            "could not determine frame header size: %s" % _zstd_error(zresult)
+        )
+
+    return zresult
+
+
+def get_frame_parameters(data):
+    """
+    Parse a zstd frame header into frame parameters.
+
+    Depending on which fields are present in the frame and their values, the
+    length of the frame parameters varies. If insufficient bytes are passed
+    in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure
+    frame parameters can be parsed, pass in at least 18 bytes.
+
+    :param data:
+       Data from which to read frame parameters.
+    :return:
+       :py:class:`FrameParameters`
+    """
+    params = ffi.new("ZSTD_frameHeader *")
+
+    data_buffer = ffi.from_buffer(data)
+    zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError(
+            "cannot get frame parameters: %s" % _zstd_error(zresult)
+        )
+
+    if zresult:
+        raise ZstdError(
+            "not enough data for frame parameters; need %d bytes" % zresult
+        )
+
+    return FrameParameters(params[0])
+
+
+class ZstdCompressionDict(object):
+    """Represents a computed compression dictionary.
+
+    Instances are obtained by calling :py:func:`train_dictionary` or by
+    passing bytes obtained from another source into the constructor.
+
+    Instances can be constructed from bytes:
+
+    >>> dict_data = zstandard.ZstdCompressionDict(data)
+
+    It is possible to construct a dictionary from *any* data. If the data
+    doesn't begin with a magic header, it will be treated as a *prefix*
+    dictionary. *Prefix* dictionaries allow compression operations to
+    reference raw data within the dictionary.
+
+    It is possible to force the use of *prefix* dictionaries or to require
+    a dictionary header:
+
+    >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_RAWCONTENT)
+    >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_FULLDICT)
+
+    You can see how many bytes are in the dictionary by calling ``len()``:
+
+    >>> dict_data = zstandard.train_dictionary(size, samples)
+    >>> dict_size = len(dict_data)  # will not be larger than ``size``
+
+    Once you have a dictionary, you can pass it to the objects performing
+    compression and decompression:
+
+    >>> dict_data = zstandard.train_dictionary(131072, samples)
+    >>> cctx = zstandard.ZstdCompressor(dict_data=dict_data)
+    >>> for source_data in input_data:
+    ...     compressed = cctx.compress(source_data)
+    ...     # Do something with compressed data.
+    ...
+    >>> dctx = zstandard.ZstdDecompressor(dict_data=dict_data)
+    >>> for compressed_data in input_data:
+    ...     buffer = io.BytesIO()
+    ...     with dctx.stream_writer(buffer) as decompressor:
+    ...         decompressor.write(compressed_data)
+    ...         # Do something with raw data in ``buffer``.
+
+    Dictionaries have unique integer IDs. You can retrieve this ID via:
+
+    >>> dict_id = zstandard.dictionary_id(dict_data)
+
+    You can obtain the raw data in the dict (useful for persisting and constructing
+    a ``ZstdCompressionDict`` later) via ``as_bytes()``:
+
+    >>> dict_data = zstandard.train_dictionary(size, samples)
+    >>> raw_data = dict_data.as_bytes()
+
+    By default, when a ``ZstdCompressionDict`` is *attached* to a
+    ``ZstdCompressor``, each ``ZstdCompressor`` performs work to prepare the
+    dictionary for use. This is fine if only 1 compression operation is being
+    performed or if the ``ZstdCompressor`` is being reused for multiple operations.
+    But if multiple ``ZstdCompressor`` instances are being used with the dictionary,
+    this can add overhead.
+
+    It is possible to *precompute* the dictionary so it can readily be consumed
+    by multiple ``ZstdCompressor`` instances:
+
+    >>> d = zstandard.ZstdCompressionDict(data)
+    >>> # Precompute for compression level 3.
+    >>> d.precompute_compress(level=3)
+    >>> # Precompute with specific compression parameters.
+    >>> params = zstandard.ZstdCompressionParameters(...)
+    >>> d.precompute_compress(compression_params=params)
+
+    .. note::
+
+       When a dictionary is precomputed, the compression parameters used to
+       precompute the dictionary overwrite some of the compression parameters
+       specified to ``ZstdCompressor``.
+
+    :param data:
+       Dictionary data.
+    :param dict_type:
+       Type of dictionary. One of the ``DICT_TYPE_*`` constants.
+    """
+
+    def __init__(self, data, dict_type=DICT_TYPE_AUTO, k=0, d=0):
+        assert isinstance(data, bytes)
+        self._data = data
+        self.k = k
+        self.d = d
+
+        if dict_type not in (
+            DICT_TYPE_AUTO,
+            DICT_TYPE_RAWCONTENT,
+            DICT_TYPE_FULLDICT,
+        ):
+            raise ValueError(
+                "invalid dictionary load mode: %d; must use "
+                "DICT_TYPE_* constants"
+            )
+
+        self._dict_type = dict_type
+        self._cdict = None
+
+    def __len__(self):
+        return len(self._data)
+
+    def dict_id(self):
+        """Obtain the integer ID of the dictionary."""
+        return int(lib.ZDICT_getDictID(self._data, len(self._data)))
+
+    def as_bytes(self):
+        """Obtain the ``bytes`` representation of the dictionary."""
+        return self._data
+
+    def precompute_compress(self, level=0, compression_params=None):
+        """Precompute a dictionary os it can be used by multiple compressors.
+
+        Calling this method on an instance that will be used by multiple
+        :py:class:`ZstdCompressor` instances will improve performance.
+        """
+        if level and compression_params:
+            raise ValueError(
+                "must only specify one of level or " "compression_params"
+            )
+
+        if not level and not compression_params:
+            raise ValueError("must specify one of level or compression_params")
+
+        if level:
+            cparams = lib.ZSTD_getCParams(level, 0, len(self._data))
+        else:
+            cparams = ffi.new("ZSTD_compressionParameters")
+            cparams.chainLog = compression_params.chain_log
+            cparams.hashLog = compression_params.hash_log
+            cparams.minMatch = compression_params.min_match
+            cparams.searchLog = compression_params.search_log
+            cparams.strategy = compression_params.strategy
+            cparams.targetLength = compression_params.target_length
+            cparams.windowLog = compression_params.window_log
+
+        cdict = lib.ZSTD_createCDict_advanced(
+            self._data,
+            len(self._data),
+            lib.ZSTD_dlm_byRef,
+            self._dict_type,
+            cparams,
+            lib.ZSTD_defaultCMem,
+        )
+        if cdict == ffi.NULL:
+            raise ZstdError("unable to precompute dictionary")
+
+        self._cdict = ffi.gc(
+            cdict, lib.ZSTD_freeCDict, size=lib.ZSTD_sizeof_CDict(cdict)
+        )
+
+    @property
+    def _ddict(self):
+        ddict = lib.ZSTD_createDDict_advanced(
+            self._data,
+            len(self._data),
+            lib.ZSTD_dlm_byRef,
+            self._dict_type,
+            lib.ZSTD_defaultCMem,
+        )
+
+        if ddict == ffi.NULL:
+            raise ZstdError("could not create decompression dict")
+
+        ddict = ffi.gc(
+            ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+        )
+        self.__dict__["_ddict"] = ddict
+
+        return ddict
+
+
+def train_dictionary(
+    dict_size,
+    samples,
+    k=0,
+    d=0,
+    f=0,
+    split_point=0.0,
+    accel=0,
+    notifications=0,
+    dict_id=0,
+    level=0,
+    steps=0,
+    threads=0,
+):
+    """Train a dictionary from sample data using the COVER algorithm.
+
+    A compression dictionary of size ``dict_size`` will be created from the
+    iterable of ``samples``. The raw dictionary bytes will be returned.
+
+    The dictionary training mechanism is known as *cover*. More details about it
+    are available in the paper *Effective Construction of Relative Lempel-Ziv
+    Dictionaries* (authors: Liao, Petri, Moffat, Wirth).
+
+    The cover algorithm takes parameters ``k`` and ``d``. These are the
+    *segment size* and *dmer size*, respectively. The returned dictionary
+    instance created by this function has ``k`` and ``d`` attributes
+    containing the values for these parameters. If a ``ZstdCompressionDict``
+    is constructed from raw bytes data (a content-only dictionary), the
+    ``k`` and ``d`` attributes will be ``0``.
+
+    The segment and dmer size parameters to the cover algorithm can either be
+    specified manually or ``train_dictionary()`` can try multiple values
+    and pick the best one, where *best* means the smallest compressed data size.
+    This later mode is called *optimization* mode.
+
+    Under the hood, this function always calls
+    ``ZDICT_optimizeTrainFromBuffer_fastCover()``. See the corresponding C library
+    documentation for more.
+
+    If neither ``steps`` nor ``threads`` is defined, defaults for ``d``, ``steps``,
+    and ``level`` will be used that are equivalent with what
+    ``ZDICT_trainFromBuffer()`` would use.
+
+
+    :param dict_size:
+       Target size in bytes of the dictionary to generate.
+    :param samples:
+       A list of bytes holding samples the dictionary will be trained from.
+    :param k:
+       Segment size : constraint: 0 < k : Reasonable range [16, 2048+]
+    :param d:
+       dmer size : constraint: 0 < d <= k : Reasonable range [6, 16]
+    :param f:
+       log of size of frequency array : constraint: 0 < f <= 31 : 1 means
+       default(20)
+    :param split_point:
+       Percentage of samples used for training: Only used for optimization.
+       The first # samples * ``split_point`` samples will be used to training.
+       The last # samples * (1 - split_point) samples will be used for testing.
+       0 means default (0.75), 1.0 when all samples are used for both training
+       and testing.
+    :param accel:
+       Acceleration level: constraint: 0 < accel <= 10. Higher means faster
+       and less accurate, 0 means default(1).
+    :param dict_id:
+       Integer dictionary ID for the produced dictionary. Default is 0, which uses
+       a random value.
+    :param steps:
+       Number of steps through ``k`` values to perform when trying parameter
+       variations.
+    :param threads:
+       Number of threads to use when trying parameter variations. Default is 0,
+       which means to use a single thread. A negative value can be specified to
+       use as many threads as there are detected logical CPUs.
+    :param level:
+       Integer target compression level when trying parameter variations.
+    :param notifications:
+       Controls writing of informational messages to ``stderr``. ``0`` (the
+       default) means to write nothing. ``1`` writes errors. ``2`` writes
+       progression info. ``3`` writes more details. And ``4`` writes all info.
+    """
+
+    if not isinstance(samples, list):
+        raise TypeError("samples must be a list")
+
+    if threads < 0:
+        threads = _cpu_count()
+
+    if not steps and not threads:
+        d = d or 8
+        steps = steps or 4
+        level = level or 3
+
+    total_size = sum(map(len, samples))
+
+    samples_buffer = new_nonzero("char[]", total_size)
+    sample_sizes = new_nonzero("size_t[]", len(samples))
+
+    offset = 0
+    for i, sample in enumerate(samples):
+        if not isinstance(sample, bytes):
+            raise ValueError("samples must be bytes")
+
+        l = len(sample)
+        ffi.memmove(samples_buffer + offset, sample, l)
+        offset += l
+        sample_sizes[i] = l
+
+    dict_data = new_nonzero("char[]", dict_size)
+
+    dparams = ffi.new("ZDICT_fastCover_params_t *")[0]
+    dparams.k = k
+    dparams.d = d
+    dparams.f = f
+    dparams.steps = steps
+    dparams.nbThreads = threads
+    dparams.splitPoint = split_point
+    dparams.accel = accel
+    dparams.zParams.notificationLevel = notifications
+    dparams.zParams.dictID = dict_id
+    dparams.zParams.compressionLevel = level
+
+    zresult = lib.ZDICT_optimizeTrainFromBuffer_fastCover(
+        ffi.addressof(dict_data),
+        dict_size,
+        ffi.addressof(samples_buffer),
+        ffi.addressof(sample_sizes, 0),
+        len(samples),
+        ffi.addressof(dparams),
+    )
+
+    if lib.ZDICT_isError(zresult):
+        msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode("utf-8")
+        raise ZstdError("cannot train dict: %s" % msg)
+
+    return ZstdCompressionDict(
+        ffi.buffer(dict_data, zresult)[:],
+        dict_type=DICT_TYPE_FULLDICT,
+        k=dparams.k,
+        d=dparams.d,
+    )
+
+
+class ZstdDecompressionObj(object):
+    """A standard library API compatible decompressor.
+
+    This type implements a compressor that conforms to the API by other
+    decompressors in Python's standard library. e.g. ``zlib.decompressobj``
+    or ``bz2.BZ2Decompressor``. This allows callers to use zstd compression
+    while conforming to a similar API.
+
+    Compressed data chunks are fed into ``decompress(data)`` and
+    uncompressed output (or an empty bytes) is returned. Output from
+    subsequent calls needs to be concatenated to reassemble the full
+    decompressed byte sequence.
+
+    If ``read_across_frames=False``, each instance is single use: once an
+    input frame is decoded, ``decompress()`` will raise an exception. If
+    ``read_across_frames=True``, instances can decode multiple frames.
+
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> dobj = dctx.decompressobj()
+    >>> data = dobj.decompress(compressed_chunk_0)
+    >>> data = dobj.decompress(compressed_chunk_1)
+
+    By default, calls to ``decompress()`` write output data in chunks of size
+    ``DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE``. These chunks are concatenated
+    before being returned to the caller. It is possible to define the size of
+    these temporary chunks by passing ``write_size`` to ``decompressobj()``:
+
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> dobj = dctx.decompressobj(write_size=1048576)
+
+    .. note::
+
+       Because calls to ``decompress()`` may need to perform multiple
+       memory (re)allocations, this streaming decompression API isn't as
+       efficient as other APIs.
+    """
+
+    def __init__(self, decompressor, write_size, read_across_frames):
+        self._decompressor = decompressor
+        self._write_size = write_size
+        self._finished = False
+        self._read_across_frames = read_across_frames
+        self._unused_input = b""
+
+    def decompress(self, data):
+        """Send compressed data to the decompressor and obtain decompressed data.
+
+        :param data:
+           Data to feed into the decompressor.
+        :return:
+           Decompressed bytes.
+        """
+        if self._finished:
+            raise ZstdError("cannot use a decompressobj multiple times")
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        data_buffer = ffi.from_buffer(data)
+
+        if len(data_buffer) == 0:
+            return b""
+
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new("char[]", self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        chunks = []
+
+        while True:
+            zresult = lib.ZSTD_decompressStream(
+                self._decompressor._dctx, out_buffer, in_buffer
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd decompressor error: %s" % _zstd_error(zresult)
+                )
+
+            # Always record any output from decompressor.
+            if out_buffer.pos:
+                chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+
+            # 0 is only seen when a frame is fully decoded *and* fully flushed.
+            # Behavior depends on whether we're in single or multiple frame
+            # mode.
+            if zresult == 0 and not self._read_across_frames:
+                # Mark the instance as done and make any unconsumed input available
+                # for retrieval.
+                self._finished = True
+                self._decompressor = None
+                self._unused_input = data[in_buffer.pos : in_buffer.size]
+                break
+            elif zresult == 0 and self._read_across_frames:
+                # We're at the end of a fully flushed frame and we can read more.
+                # Try to read more if there's any more input.
+                if in_buffer.pos == in_buffer.size:
+                    break
+                else:
+                    out_buffer.pos = 0
+
+            # We're not at the end of the frame *or* we're not fully flushed.
+
+            # The decompressor will write out all the bytes it can to the output
+            # buffer. So if the output buffer is partially filled and the input
+            # is exhausted, there's nothing more to write. So we've done all we
+            # can.
+            elif (
+                in_buffer.pos == in_buffer.size
+                and out_buffer.pos < out_buffer.size
+            ):
+                break
+            else:
+                out_buffer.pos = 0
+
+        return b"".join(chunks)
+
+    def flush(self, length=0):
+        """Effectively a no-op.
+
+        Implemented for compatibility with the standard library APIs.
+
+        Safe to call at any time.
+
+        :return:
+           Empty bytes.
+        """
+        return b""
+
+    @property
+    def unused_data(self):
+        """Bytes past the end of compressed data.
+
+        If ``decompress()`` is fed additional data beyond the end of a zstd
+        frame, this value will be non-empty once ``decompress()`` fully decodes
+        the input frame.
+        """
+        return self._unused_input
+
+    @property
+    def unconsumed_tail(self):
+        """Data that has not yet been fed into the decompressor."""
+        return b""
+
+    @property
+    def eof(self):
+        """Whether the end of the compressed data stream has been reached."""
+        return self._finished
+
+
+class ZstdDecompressionReader(object):
+    """Read only decompressor that pull uncompressed data from another stream.
+
+    This type provides a read-only stream interface for performing transparent
+    decompression from another stream or data source. It conforms to the
+    ``io.RawIOBase`` interface. Only methods relevant to reading are
+    implemented.
+
+    >>> with open(path, 'rb') as fh:
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> reader = dctx.stream_reader(fh)
+    >>> while True:
+    ...     chunk = reader.read(16384)
+    ...     if not chunk:
+    ...         break
+    ...     # Do something with decompressed chunk.
+
+    The stream can also be used as a context manager:
+
+    >>> with open(path, 'rb') as fh:
+    ...     dctx = zstandard.ZstdDecompressor()
+    ...     with dctx.stream_reader(fh) as reader:
+    ...         ...
+
+    When used as a context manager, the stream is closed and the underlying
+    resources are released when the context manager exits. Future operations
+    against the stream will fail.
+
+    The ``source`` argument to ``stream_reader()`` can be any object with a
+    ``read(size)`` method or any object implementing the *buffer protocol*.
+
+    If the ``source`` is a stream, you can specify how large ``read()`` requests
+    to that stream should be via the ``read_size`` argument. It defaults to
+    ``zstandard.DECOMPRESSION_RECOMMENDED_INPUT_SIZE``.:
+
+    >>> with open(path, 'rb') as fh:
+    ...     dctx = zstandard.ZstdDecompressor()
+    ...     # Will perform fh.read(8192) when obtaining data for the decompressor.
+    ...     with dctx.stream_reader(fh, read_size=8192) as reader:
+    ...         ...
+
+    Instances are *partially* seekable. Absolute and relative positions
+    (``SEEK_SET`` and ``SEEK_CUR``) forward of the current position are
+    allowed. Offsets behind the current read position and offsets relative
+    to the end of stream are not allowed and will raise ``ValueError``
+    if attempted.
+
+    ``tell()`` returns the number of decompressed bytes read so far.
+
+    Not all I/O methods are implemented. Notably missing is support for
+    ``readline()``, ``readlines()``, and linewise iteration support. This is
+    because streams operate on binary data - not text data. If you want to
+    convert decompressed output to text, you can chain an ``io.TextIOWrapper``
+    to the stream:
+
+    >>> with open(path, 'rb') as fh:
+    ...     dctx = zstandard.ZstdDecompressor()
+    ...     stream_reader = dctx.stream_reader(fh)
+    ...     text_stream = io.TextIOWrapper(stream_reader, encoding='utf-8')
+    ...     for line in text_stream:
+    ...         ...
+    """
+
+    def __init__(
+        self,
+        decompressor,
+        source,
+        read_size,
+        read_across_frames,
+        closefd=True,
+    ):
+        self._decompressor = decompressor
+        self._source = source
+        self._read_size = read_size
+        self._read_across_frames = bool(read_across_frames)
+        self._closefd = bool(closefd)
+        self._entered = False
+        self._closed = False
+        self._bytes_decompressed = 0
+        self._finished_input = False
+        self._finished_output = False
+        self._in_buffer = ffi.new("ZSTD_inBuffer *")
+        # Holds a ref to self._in_buffer.src.
+        self._source_buffer = None
+
+    def __enter__(self):
+        if self._entered:
+            raise ValueError("cannot __enter__ multiple times")
+
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        self._entered = True
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+        self._decompressor = None
+        self.close()
+        self._source = None
+
+        return False
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return False
+
+    def seekable(self):
+        return False
+
+    def readline(self, size=-1):
+        raise io.UnsupportedOperation()
+
+    def readlines(self, hint=-1):
+        raise io.UnsupportedOperation()
+
+    def write(self, data):
+        raise io.UnsupportedOperation()
+
+    def writelines(self, lines):
+        raise io.UnsupportedOperation()
+
+    def isatty(self):
+        return False
+
+    def flush(self):
+        return None
+
+    def close(self):
+        if self._closed:
+            return None
+
+        self._closed = True
+
+        f = getattr(self._source, "close", None)
+        if self._closefd and f:
+            f()
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def tell(self):
+        return self._bytes_decompressed
+
+    def readall(self):
+        chunks = []
+
+        while True:
+            chunk = self.read(1048576)
+            if not chunk:
+                break
+
+            chunks.append(chunk)
+
+        return b"".join(chunks)
+
+    def __iter__(self):
+        raise io.UnsupportedOperation()
+
+    def __next__(self):
+        raise io.UnsupportedOperation()
+
+    next = __next__
+
+    def _read_input(self):
+        # We have data left over in the input buffer. Use it.
+        if self._in_buffer.pos < self._in_buffer.size:
+            return
+
+        # All input data exhausted. Nothing to do.
+        if self._finished_input:
+            return
+
+        # Else populate the input buffer from our source.
+        if hasattr(self._source, "read"):
+            data = self._source.read(self._read_size)
+
+            if not data:
+                self._finished_input = True
+                return
+
+            self._source_buffer = ffi.from_buffer(data)
+            self._in_buffer.src = self._source_buffer
+            self._in_buffer.size = len(self._source_buffer)
+            self._in_buffer.pos = 0
+        else:
+            self._source_buffer = ffi.from_buffer(self._source)
+            self._in_buffer.src = self._source_buffer
+            self._in_buffer.size = len(self._source_buffer)
+            self._in_buffer.pos = 0
+
+    def _decompress_into_buffer(self, out_buffer):
+        """Decompress available input into an output buffer.
+
+        Returns True if data in output buffer should be emitted.
+        """
+        zresult = lib.ZSTD_decompressStream(
+            self._decompressor._dctx, out_buffer, self._in_buffer
+        )
+
+        if self._in_buffer.pos == self._in_buffer.size:
+            self._in_buffer.src = ffi.NULL
+            self._in_buffer.pos = 0
+            self._in_buffer.size = 0
+            self._source_buffer = None
+
+            if not hasattr(self._source, "read"):
+                self._finished_input = True
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+
+        # Emit data if there is data AND either:
+        # a) output buffer is full (read amount is satisfied)
+        # b) we're at end of a frame and not in frame spanning mode
+        return out_buffer.pos and (
+            out_buffer.pos == out_buffer.size
+            or zresult == 0
+            and not self._read_across_frames
+        )
+
+    def read(self, size=-1):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if size < -1:
+            raise ValueError("cannot read negative amounts less than -1")
+
+        if size == -1:
+            # This is recursive. But it gets the job done.
+            return self.readall()
+
+        if self._finished_output or size == 0:
+            return b""
+
+        # We /could/ call into readinto() here. But that introduces more
+        # overhead.
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dst_buffer
+        out_buffer.size = size
+        out_buffer.pos = 0
+
+        self._read_input()
+        if self._decompress_into_buffer(out_buffer):
+            self._bytes_decompressed += out_buffer.pos
+            return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+        while not self._finished_input:
+            self._read_input()
+            if self._decompress_into_buffer(out_buffer):
+                self._bytes_decompressed += out_buffer.pos
+                return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+        self._bytes_decompressed += out_buffer.pos
+        return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+    def readinto(self, b):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._finished_output:
+            return 0
+
+        # TODO use writable=True once we require CFFI >= 1.12.
+        dest_buffer = ffi.from_buffer(b)
+        ffi.memmove(b, b"", 0)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dest_buffer
+        out_buffer.size = len(dest_buffer)
+        out_buffer.pos = 0
+
+        self._read_input()
+        if self._decompress_into_buffer(out_buffer):
+            self._bytes_decompressed += out_buffer.pos
+            return out_buffer.pos
+
+        while not self._finished_input:
+            self._read_input()
+            if self._decompress_into_buffer(out_buffer):
+                self._bytes_decompressed += out_buffer.pos
+                return out_buffer.pos
+
+        self._bytes_decompressed += out_buffer.pos
+        return out_buffer.pos
+
+    def read1(self, size=-1):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if size < -1:
+            raise ValueError("cannot read negative amounts less than -1")
+
+        if self._finished_output or size == 0:
+            return b""
+
+        # -1 returns arbitrary number of bytes.
+        if size == -1:
+            size = DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dst_buffer
+        out_buffer.size = size
+        out_buffer.pos = 0
+
+        # read1() dictates that we can perform at most 1 call to underlying
+        # stream to get input. However, we can't satisfy this restriction with
+        # decompression because not all input generates output. So we allow
+        # multiple read(). But unlike read(), we stop once we have any output.
+        while not self._finished_input:
+            self._read_input()
+            self._decompress_into_buffer(out_buffer)
+
+            if out_buffer.pos:
+                break
+
+        self._bytes_decompressed += out_buffer.pos
+        return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+
+    def readinto1(self, b):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._finished_output:
+            return 0
+
+        # TODO use writable=True once we require CFFI >= 1.12.
+        dest_buffer = ffi.from_buffer(b)
+        ffi.memmove(b, b"", 0)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = dest_buffer
+        out_buffer.size = len(dest_buffer)
+        out_buffer.pos = 0
+
+        while not self._finished_input and not self._finished_output:
+            self._read_input()
+            self._decompress_into_buffer(out_buffer)
+
+            if out_buffer.pos:
+                break
+
+        self._bytes_decompressed += out_buffer.pos
+        return out_buffer.pos
+
+    def seek(self, pos, whence=os.SEEK_SET):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        read_amount = 0
+
+        if whence == os.SEEK_SET:
+            if pos < 0:
+                raise OSError("cannot seek to negative position with SEEK_SET")
+
+            if pos < self._bytes_decompressed:
+                raise OSError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
+
+            read_amount = pos - self._bytes_decompressed
+
+        elif whence == os.SEEK_CUR:
+            if pos < 0:
+                raise OSError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
+
+            read_amount = pos
+        elif whence == os.SEEK_END:
+            raise OSError(
+                "zstd decompression streams cannot be seeked " "with SEEK_END"
+            )
+
+        while read_amount:
+            result = self.read(
+                min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+            )
+
+            if not result:
+                break
+
+            read_amount -= len(result)
+
+        return self._bytes_decompressed
+
+
+class ZstdDecompressionWriter(object):
+    """
+    Write-only stream wrapper that performs decompression.
+
+    This type provides a writable stream that performs decompression and writes
+    decompressed data to another stream.
+
+    This type implements the ``io.RawIOBase`` interface. Only methods that
+    involve writing will do useful things.
+
+    Behavior is similar to :py:meth:`ZstdCompressor.stream_writer`: compressed
+    data is sent to the decompressor by calling ``write(data)`` and decompressed
+    output is written to the inner stream by calling its ``write(data)``
+    method:
+
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> decompressor = dctx.stream_writer(fh)
+    >>> # Will call fh.write() with uncompressed data.
+    >>> decompressor.write(compressed_data)
+
+    Instances can be used as context managers. However, context managers add no
+    extra special behavior other than automatically calling ``close()`` when
+    they exit.
+
+    Calling ``close()`` will mark the stream as closed and subsequent I/O
+    operations will raise ``ValueError`` (per the documented behavior of
+    ``io.RawIOBase``). ``close()`` will also call ``close()`` on the
+    underlying stream if such a method exists and the instance was created with
+    ``closefd=True``.
+
+    The size of chunks to ``write()`` to the destination can be specified:
+
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> with dctx.stream_writer(fh, write_size=16384) as decompressor:
+    >>>    pass
+
+    You can see how much memory is being used by the decompressor:
+
+    >>> dctx = zstandard.ZstdDecompressor()
+    >>> with dctx.stream_writer(fh) as decompressor:
+    >>>    byte_size = decompressor.memory_size()
+
+    ``stream_writer()`` accepts a ``write_return_read`` boolean argument to control
+    the return value of ``write()``. When ``True`` (the default)``, ``write()``
+    returns the number of bytes that were read from the input. When ``False``,
+    ``write()`` returns the number of bytes that were ``write()`` to the inner
+    stream.
+    """
+
+    def __init__(
+        self,
+        decompressor,
+        writer,
+        write_size,
+        write_return_read,
+        closefd=True,
+    ):
+        decompressor._ensure_dctx()
+
+        self._decompressor = decompressor
+        self._writer = writer
+        self._write_size = write_size
+        self._write_return_read = bool(write_return_read)
+        self._closefd = bool(closefd)
+        self._entered = False
+        self._closing = False
+        self._closed = False
+
+    def __enter__(self):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        if self._entered:
+            raise ZstdError("cannot __enter__ multiple times")
+
+        self._entered = True
+
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+        self.close()
+
+        return False
+
+    def __iter__(self):
+        raise io.UnsupportedOperation()
+
+    def __next__(self):
+        raise io.UnsupportedOperation()
+
+    def memory_size(self):
+        return lib.ZSTD_sizeof_DCtx(self._decompressor._dctx)
+
+    def close(self):
+        if self._closed:
+            return
+
+        try:
+            self._closing = True
+            self.flush()
+        finally:
+            self._closing = False
+            self._closed = True
+
+        f = getattr(self._writer, "close", None)
+        if self._closefd and f:
+            f()
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def fileno(self):
+        f = getattr(self._writer, "fileno", None)
+        if f:
+            return f()
+        else:
+            raise OSError("fileno not available on underlying writer")
+
+    def flush(self):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        f = getattr(self._writer, "flush", None)
+        if f and not self._closing:
+            return f()
+
+    def isatty(self):
+        return False
+
+    def readable(self):
+        return False
+
+    def readline(self, size=-1):
+        raise io.UnsupportedOperation()
+
+    def readlines(self, hint=-1):
+        raise io.UnsupportedOperation()
+
+    def seek(self, offset, whence=None):
+        raise io.UnsupportedOperation()
+
+    def seekable(self):
+        return False
+
+    def tell(self):
+        raise io.UnsupportedOperation()
+
+    def truncate(self, size=None):
+        raise io.UnsupportedOperation()
+
+    def writable(self):
+        return True
+
+    def writelines(self, lines):
+        raise io.UnsupportedOperation()
+
+    def read(self, size=-1):
+        raise io.UnsupportedOperation()
+
+    def readall(self):
+        raise io.UnsupportedOperation()
+
+    def readinto(self, b):
+        raise io.UnsupportedOperation()
+
+    def write(self, data):
+        if self._closed:
+            raise ValueError("stream is closed")
+
+        total_write = 0
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        data_buffer = ffi.from_buffer(data)
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new("char[]", self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        dctx = self._decompressor._dctx
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "zstd decompress error: %s" % _zstd_error(zresult)
+                )
+
+            if out_buffer.pos:
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+        if self._write_return_read:
+            return in_buffer.pos
+        else:
+            return total_write
+
+
+class ZstdDecompressor(object):
+    """
+    Context for performing zstandard decompression.
+
+    Each instance is essentially a wrapper around a ``ZSTD_DCtx`` from zstd's
+    C API.
+
+    An instance can compress data various ways. Instances can be used multiple
+    times.
+
+    The interface of this class is very similar to
+    :py:class:`zstandard.ZstdCompressor` (by design).
+
+    Assume that each ``ZstdDecompressor`` instance can only handle a single
+    logical compression operation at the same time. i.e. if you call a method
+    like ``decompressobj()`` to obtain multiple objects derived from the same
+    ``ZstdDecompressor`` instance and attempt to use them simultaneously, errors
+    will likely occur.
+
+    If you need to perform multiple logical decompression operations and you
+    can't guarantee those operations are temporally non-overlapping, you need
+    to obtain multiple ``ZstdDecompressor`` instances.
+
+    Unless specified otherwise, assume that no two methods of
+    ``ZstdDecompressor`` instances can be called from multiple Python
+    threads simultaneously. In other words, assume instances are not thread safe
+    unless stated otherwise.
+
+    :param dict_data:
+       Compression dictionary to use.
+    :param max_window_size:
+       Sets an upper limit on the window size for decompression operations in
+       kibibytes. This setting can be used to prevent large memory allocations
+       for inputs using large compression windows.
+    :param format:
+       Set the format of data for the decoder.
+
+       By default this is ``zstandard.FORMAT_ZSTD1``. It can be set to
+       ``zstandard.FORMAT_ZSTD1_MAGICLESS`` to allow decoding frames without
+       the 4 byte magic header. Not all decompression APIs support this mode.
+    """
+
+    def __init__(self, dict_data=None, max_window_size=0, format=FORMAT_ZSTD1):
+        self._dict_data = dict_data
+        self._max_window_size = max_window_size
+        self._format = format
+
+        dctx = lib.ZSTD_createDCtx()
+        if dctx == ffi.NULL:
+            raise MemoryError()
+
+        self._dctx = dctx
+
+        # Defer setting up garbage collection until full state is loaded so
+        # the memory size is more accurate.
+        try:
+            self._ensure_dctx()
+        finally:
+            self._dctx = ffi.gc(
+                dctx, lib.ZSTD_freeDCtx, size=lib.ZSTD_sizeof_DCtx(dctx)
+            )
+
+    def memory_size(self):
+        """Size of decompression context, in bytes.
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> size = dctx.memory_size()
+        """
+        return lib.ZSTD_sizeof_DCtx(self._dctx)
+
+    def decompress(
+        self,
+        data,
+        max_output_size=0,
+        read_across_frames=False,
+        allow_extra_data=True,
+    ):
+        """
+        Decompress data in a single operation.
+
+        This method will decompress the input data in a single operation and
+        return the decompressed data.
+
+        The input bytes are expected to contain at least 1 full Zstandard frame
+        (something compressed with :py:meth:`ZstdCompressor.compress` or
+        similar). If the input does not contain a full frame, an exception will
+        be raised.
+
+        ``read_across_frames`` controls whether to read multiple zstandard
+        frames in the input. When False, decompression stops after reading the
+        first frame. This feature is not yet implemented but the argument is
+        provided for forward API compatibility when the default is changed to
+        True in a future release. For now, if you need to decompress multiple
+        frames, use an API like :py:meth:`ZstdCompressor.stream_reader` with
+        ``read_across_frames=True``.
+
+        ``allow_extra_data`` controls how to handle extra input data after a
+        fully decoded frame. If False, any extra data (which could be a valid
+        zstd frame) will result in ``ZstdError`` being raised. If True, extra
+        data is silently ignored. The default will likely change to False in a
+        future release when ``read_across_frames`` defaults to True.
+
+        If the input contains extra data after a full frame, that extra input
+        data is silently ignored. This behavior is undesirable in many scenarios
+        and will likely be changed or controllable in a future release (see
+        #181).
+
+        If the frame header of the compressed data does not contain the content
+        size, ``max_output_size`` must be specified or ``ZstdError`` will be
+        raised. An allocation of size ``max_output_size`` will be performed and an
+        attempt will be made to perform decompression into that buffer. If the
+        buffer is too small or cannot be allocated, ``ZstdError`` will be
+        raised. The buffer will be resized if it is too large.
+
+        Uncompressed data could be much larger than compressed data. As a result,
+        calling this function could result in a very large memory allocation
+        being performed to hold the uncompressed data. This could potentially
+        result in ``MemoryError`` or system memory swapping. If you don't need
+        the full output data in a single contiguous array in memory, consider
+        using streaming decompression for more resilient memory behavior.
+
+        Usage:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> decompressed = dctx.decompress(data)
+
+        If the compressed data doesn't have its content size embedded within it,
+        decompression can be attempted by specifying the ``max_output_size``
+        argument:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> uncompressed = dctx.decompress(data, max_output_size=1048576)
+
+        Ideally, ``max_output_size`` will be identical to the decompressed
+        output size.
+
+        .. important::
+
+           If the exact size of decompressed data is unknown (not passed in
+           explicitly and not stored in the zstd frame), for performance
+           reasons it is encouraged to use a streaming API.
+
+        :param data:
+           Compressed data to decompress.
+        :param max_output_size:
+           Integer max size of response.
+
+           If ``0``, there is no limit and we can attempt to allocate an output
+           buffer of infinite size.
+        :return:
+           ``bytes`` representing decompressed output.
+        """
+
+        if read_across_frames:
+            raise ZstdError(
+                "ZstdDecompressor.read_across_frames=True is not yet implemented"
+            )
+
+        self._ensure_dctx()
+
+        data_buffer = ffi.from_buffer(data)
+
+        output_size = lib.ZSTD_getFrameContentSize(
+            data_buffer, len(data_buffer)
+        )
+
+        if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
+            raise ZstdError("error determining content size from frame header")
+        elif output_size == 0:
+            return b""
+        elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+            if not max_output_size:
+                raise ZstdError(
+                    "could not determine content size in frame header"
+                )
+
+            result_buffer = ffi.new("char[]", max_output_size)
+            result_size = max_output_size
+            output_size = 0
+        else:
+            result_buffer = ffi.new("char[]", output_size)
+            result_size = output_size
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = result_buffer
+        out_buffer.size = result_size
+        out_buffer.pos = 0
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError("decompression error: %s" % _zstd_error(zresult))
+        elif zresult:
+            raise ZstdError(
+                "decompression error: did not decompress full frame"
+            )
+        elif output_size and out_buffer.pos != output_size:
+            raise ZstdError(
+                "decompression error: decompressed %d bytes; expected %d"
+                % (zresult, output_size)
+            )
+        elif not allow_extra_data and in_buffer.pos < in_buffer.size:
+            count = in_buffer.size - in_buffer.pos
+
+            raise ZstdError(
+                "compressed input contains %d bytes of unused data, which is disallowed"
+                % count
+            )
+
+        return ffi.buffer(result_buffer, out_buffer.pos)[:]
+
+    def stream_reader(
+        self,
+        source,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        read_across_frames=False,
+        closefd=True,
+    ):
+        """
+        Read-only stream wrapper that performs decompression.
+
+        This method obtains an object that conforms to the ``io.RawIOBase``
+        interface and performs transparent decompression via ``read()``
+        operations. Source data is obtained by calling ``read()`` on a
+        source stream or object implementing the buffer protocol.
+
+        See :py:class:`zstandard.ZstdDecompressionReader` for more documentation
+        and usage examples.
+
+        :param source:
+           Source of compressed data to decompress. Can be any object
+           with a ``read(size)`` method or that conforms to the buffer protocol.
+        :param read_size:
+           Integer number of bytes to read from the source and feed into the
+           compressor at a time.
+        :param read_across_frames:
+           Whether to read data across multiple zstd frames. If False,
+           decompression is stopped at frame boundaries.
+        :param closefd:
+           Whether to close the source stream when this instance is closed.
+        :return:
+           :py:class:`zstandard.ZstdDecompressionReader`.
+        """
+        self._ensure_dctx()
+        return ZstdDecompressionReader(
+            self, source, read_size, read_across_frames, closefd=closefd
+        )
+
+    def decompressobj(
+        self,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        read_across_frames=False,
+    ):
+        """Obtain a standard library compatible incremental decompressor.
+
+        See :py:class:`ZstdDecompressionObj` for more documentation
+        and usage examples.
+
+        :param write_size: size of internal output buffer to collect decompressed
+          chunks in.
+        :param read_across_frames: whether to read across multiple zstd frames.
+          If False, reading stops after 1 frame and subsequent decompress
+          attempts will raise an exception.
+        :return:
+           :py:class:`zstandard.ZstdDecompressionObj`
+        """
+        if write_size < 1:
+            raise ValueError("write_size must be positive")
+
+        self._ensure_dctx()
+        return ZstdDecompressionObj(
+            self, write_size=write_size, read_across_frames=read_across_frames
+        )
+
+    def read_to_iter(
+        self,
+        reader,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        skip_bytes=0,
+    ):
+        """Read compressed data to an iterator of uncompressed chunks.
+
+        This method will read data from ``reader``, feed it to a decompressor,
+        and emit ``bytes`` chunks representing the decompressed result.
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> for chunk in dctx.read_to_iter(fh):
+        ...     # Do something with original data.
+
+        ``read_to_iter()`` accepts an object with a ``read(size)`` method that
+        will return compressed bytes or an object conforming to the buffer
+        protocol.
+
+        ``read_to_iter()`` returns an iterator whose elements are chunks of the
+        decompressed data.
+
+        The size of requested ``read()`` from the source can be specified:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> for chunk in dctx.read_to_iter(fh, read_size=16384):
+        ...    pass
+
+        It is also possible to skip leading bytes in the input data:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> for chunk in dctx.read_to_iter(fh, skip_bytes=1):
+        ...    pass
+
+        .. tip::
+
+           Skipping leading bytes is useful if the source data contains extra
+           *header* data. Traditionally, you would need to create a slice or
+           ``memoryview`` of the data you want to decompress. This would create
+           overhead. It is more efficient to pass the offset into this API.
+
+        Similarly to :py:meth:`ZstdCompressor.read_to_iter`, the consumer of the
+        iterator controls when data is decompressed. If the iterator isn't consumed,
+        decompression is put on hold.
+
+        When ``read_to_iter()`` is passed an object conforming to the buffer protocol,
+        the behavior may seem similar to what occurs when the simple decompression
+        API is used. However, this API works when the decompressed size is unknown.
+        Furthermore, if feeding large inputs, the decompressor will work in chunks
+        instead of performing a single operation.
+
+        :param reader:
+           Source of compressed data. Can be any object with a
+           ``read(size)`` method or any object conforming to the buffer
+           protocol.
+        :param read_size:
+           Integer size of data chunks to read from ``reader`` and feed into
+           the decompressor.
+        :param write_size:
+           Integer size of data chunks to emit from iterator.
+        :param skip_bytes:
+           Integer number of bytes to skip over before sending data into
+           the decompressor.
+        :return:
+           Iterator of ``bytes`` representing uncompressed data.
+        """
+
+        if skip_bytes >= read_size:
+            raise ValueError("skip_bytes must be smaller than read_size")
+
+        if hasattr(reader, "read"):
+            have_read = True
+        elif hasattr(reader, "__getitem__"):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError(
+                "must pass an object with a read() method or "
+                "conforms to buffer protocol"
+            )
+
+        if skip_bytes:
+            if have_read:
+                reader.read(skip_bytes)
+            else:
+                if skip_bytes > size:
+                    raise ValueError("skip_bytes larger than first input chunk")
+
+                buffer_offset = skip_bytes
+
+        self._ensure_dctx()
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        while True:
+            assert out_buffer.pos == 0
+
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = size - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset : buffer_offset + slice_size]
+                buffer_offset += slice_size
+
+            # No new input. Break out of read loop.
+            if not read_result:
+                break
+
+            # Feed all read data into decompressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                assert out_buffer.pos == 0
+
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError(
+                        "zstd decompress error: %s" % _zstd_error(zresult)
+                    )
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+                if zresult == 0:
+                    return
+
+            # Repeat loop to collect more input data.
+            continue
+
+        # If we get here, input is exhausted.
+
+    def stream_writer(
+        self,
+        writer,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        write_return_read=True,
+        closefd=True,
+    ):
+        """
+        Push-based stream wrapper that performs decompression.
+
+        This method constructs a stream wrapper that conforms to the
+        ``io.RawIOBase`` interface and performs transparent decompression
+        when writing to a wrapper stream.
+
+        See :py:class:`zstandard.ZstdDecompressionWriter` for more documentation
+        and usage examples.
+
+        :param writer:
+           Destination for decompressed output. Can be any object with a
+           ``write(data)``.
+        :param write_size:
+           Integer size of chunks to ``write()`` to ``writer``.
+        :param write_return_read:
+           Whether ``write()`` should return the number of bytes of input
+           consumed. If False, ``write()`` returns the number of bytes sent
+           to the inner stream.
+        :param closefd:
+           Whether to ``close()`` the inner stream when this stream is closed.
+        :return:
+           :py:class:`zstandard.ZstdDecompressionWriter`
+        """
+        if not hasattr(writer, "write"):
+            raise ValueError("must pass an object with a write() method")
+
+        return ZstdDecompressionWriter(
+            self,
+            writer,
+            write_size,
+            write_return_read,
+            closefd=closefd,
+        )
+
+    def copy_stream(
+        self,
+        ifh,
+        ofh,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+        """
+        Copy data between streams, decompressing in the process.
+
+        Compressed data will be read from ``ifh``, decompressed, and written
+        to ``ofh``.
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> dctx.copy_stream(ifh, ofh)
+
+        e.g. to decompress a file to another file:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh:
+        ...     dctx.copy_stream(ifh, ofh)
+
+        The size of chunks being ``read()`` and ``write()`` from and to the
+        streams can be specified:
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384)
+
+        :param ifh:
+           Source stream to read compressed data from.
+
+           Must have a ``read()`` method.
+        :param ofh:
+           Destination stream to write uncompressed data to.
+
+           Must have a ``write()`` method.
+        :param read_size:
+           The number of bytes to ``read()`` from the source in a single
+           operation.
+        :param write_size:
+           The number of bytes to ``write()`` to the destination in a single
+           operation.
+        :return:
+           2-tuple of integers representing the number of bytes read and
+           written, respectively.
+        """
+
+        if not hasattr(ifh, "read"):
+            raise ValueError("first argument must have a read() method")
+        if not hasattr(ofh, "write"):
+            raise ValueError("second argument must have a write() method")
+
+        self._ensure_dctx()
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        total_read, total_write = 0, 0
+
+        # Read all available input.
+        while True:
+            data = ifh.read(read_size)
+            if not data:
+                break
+
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
+            in_buffer.pos = 0
+
+            # Flush all read data to output.
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError(
+                        "zstd decompressor error: %s" % _zstd_error(zresult)
+                    )
+
+                if out_buffer.pos:
+                    ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    total_write += out_buffer.pos
+                    out_buffer.pos = 0
+
+            # Continue loop to keep reading.
+
+        return total_read, total_write
+
+    def decompress_content_dict_chain(self, frames):
+        """
+        Decompress a series of frames using the content dictionary chaining technique.
+
+        Such a list of frames is produced by compressing discrete inputs where
+        each non-initial input is compressed with a *prefix* dictionary consisting
+        of the content of the previous input.
+
+        For example, say you have the following inputs:
+
+        >>> inputs = [b"input 1", b"input 2", b"input 3"]
+
+        The zstd frame chain consists of:
+
+        1. ``b"input 1"`` compressed in standalone/discrete mode
+        2. ``b"input 2"`` compressed using ``b"input 1"`` as a *prefix* dictionary
+        3. ``b"input 3"`` compressed using ``b"input 2"`` as a *prefix* dictionary
+
+        Each zstd frame **must** have the content size written.
+
+        The following Python code can be used to produce a *prefix dictionary chain*:
+
+        >>> def make_chain(inputs):
+        ...    frames = []
+        ...
+        ...    # First frame is compressed in standalone/discrete mode.
+        ...    zctx = zstandard.ZstdCompressor()
+        ...    frames.append(zctx.compress(inputs[0]))
+        ...
+        ...    # Subsequent frames use the previous fulltext as a prefix dictionary
+        ...    for i, raw in enumerate(inputs[1:]):
+        ...        dict_data = zstandard.ZstdCompressionDict(
+        ...            inputs[i], dict_type=zstandard.DICT_TYPE_RAWCONTENT)
+        ...        zctx = zstandard.ZstdCompressor(dict_data=dict_data)
+        ...        frames.append(zctx.compress(raw))
+        ...
+        ...    return frames
+
+        ``decompress_content_dict_chain()`` returns the uncompressed data of the last
+        element in the input chain.
+
+        .. note::
+
+           It is possible to implement *prefix dictionary chain* decompression
+           on top of other APIs. However, this function will likely be faster -
+           especially for long input chains - as it avoids the overhead of
+           instantiating and passing around intermediate objects between
+           multiple functions.
+
+        :param frames:
+           List of ``bytes`` holding compressed zstd frames.
+        :return:
+        """
+        if not isinstance(frames, list):
+            raise TypeError("argument must be a list")
+
+        if not frames:
+            raise ValueError("empty input chain")
+
+        # First chunk should not be using a dictionary. We handle it specially.
+        chunk = frames[0]
+        if not isinstance(chunk, bytes):
+            raise ValueError("chunk 0 must be bytes")
+
+        # All chunks should be zstd frames and should have content size set.
+        chunk_buffer = ffi.from_buffer(chunk)
+        params = ffi.new("ZSTD_frameHeader *")
+        zresult = lib.ZSTD_getFrameHeader(
+            params, chunk_buffer, len(chunk_buffer)
+        )
+        if lib.ZSTD_isError(zresult):
+            raise ValueError("chunk 0 is not a valid zstd frame")
+        elif zresult:
+            raise ValueError("chunk 0 is too small to contain a zstd frame")
+
+        if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+            raise ValueError("chunk 0 missing content size in frame")
+
+        self._ensure_dctx(load_dict=False)
+
+        last_buffer = ffi.new("char[]", params.frameContentSize)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        out_buffer.dst = last_buffer
+        out_buffer.size = len(last_buffer)
+        out_buffer.pos = 0
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        in_buffer.src = chunk_buffer
+        in_buffer.size = len(chunk_buffer)
+        in_buffer.pos = 0
+
+        zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "could not decompress chunk 0: %s" % _zstd_error(zresult)
+            )
+        elif zresult:
+            raise ZstdError("chunk 0 did not decompress full frame")
+
+        # Special case of chain length of 1
+        if len(frames) == 1:
+            return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+        i = 1
+        while i < len(frames):
+            chunk = frames[i]
+            if not isinstance(chunk, bytes):
+                raise ValueError("chunk %d must be bytes" % i)
+
+            chunk_buffer = ffi.from_buffer(chunk)
+            zresult = lib.ZSTD_getFrameHeader(
+                params, chunk_buffer, len(chunk_buffer)
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ValueError("chunk %d is not a valid zstd frame" % i)
+            elif zresult:
+                raise ValueError(
+                    "chunk %d is too small to contain a zstd frame" % i
+                )
+
+            if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
+                raise ValueError("chunk %d missing content size in frame" % i)
+
+            dest_buffer = ffi.new("char[]", params.frameContentSize)
+
+            out_buffer.dst = dest_buffer
+            out_buffer.size = len(dest_buffer)
+            out_buffer.pos = 0
+
+            in_buffer.src = chunk_buffer
+            in_buffer.size = len(chunk_buffer)
+            in_buffer.pos = 0
+
+            zresult = lib.ZSTD_decompressStream(
+                self._dctx, out_buffer, in_buffer
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "could not decompress chunk %d: %s" % _zstd_error(zresult)
+                )
+            elif zresult:
+                raise ZstdError("chunk %d did not decompress full frame" % i)
+
+            last_buffer = dest_buffer
+            i += 1
+
+        return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+    def multi_decompress_to_buffer(
+        self, frames, decompressed_sizes=None, threads=0
+    ):
+        """
+        Decompress multiple zstd frames to output buffers as a single operation.
+
+        (Experimental. Not available in CFFI backend.)
+
+        Compressed frames can be passed to the function as a
+        ``BufferWithSegments``, a ``BufferWithSegmentsCollection``, or as a
+        list containing objects that conform to the buffer protocol. For best
+        performance, pass a ``BufferWithSegmentsCollection`` or a
+        ``BufferWithSegments``, as minimal input validation will be done for
+        that type. If calling from Python (as opposed to C), constructing one
+        of these instances may add overhead cancelling out the performance
+        overhead of validation for list inputs.
+
+        Returns a ``BufferWithSegmentsCollection`` containing the decompressed
+        data. All decompressed data is allocated in a single memory buffer. The
+        ``BufferWithSegments`` instance tracks which objects are at which offsets
+        and their respective lengths.
+
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> results = dctx.multi_decompress_to_buffer([b'...', b'...'])
+
+        The decompressed size of each frame MUST be discoverable. It can either be
+        embedded within the zstd frame or passed in via the ``decompressed_sizes``
+        argument.
+
+        The ``decompressed_sizes`` argument is an object conforming to the buffer
+        protocol which holds an array of 64-bit unsigned integers in the machine's
+        native format defining the decompressed sizes of each frame. If this argument
+        is passed, it avoids having to scan each frame for its decompressed size.
+        This frame scanning can add noticeable overhead in some scenarios.
+
+        >>> frames = [...]
+        >>> sizes = struct.pack('=QQQQ', len0, len1, len2, len3)
+        >>>
+        >>> dctx = zstandard.ZstdDecompressor()
+        >>> results = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+
+        .. note::
+
+           It is possible to pass a ``mmap.mmap()`` instance into this function by
+           wrapping it with a ``BufferWithSegments`` instance (which will define the
+           offsets of frames within the memory mapped region).
+
+        This function is logically equivalent to performing
+        :py:meth:`ZstdCompressor.decompress` on each input frame and returning the
+        result.
+
+        This function exists to perform decompression on multiple frames as fast
+        as possible by having as little overhead as possible. Since decompression is
+        performed as a single operation and since the decompressed output is stored in
+        a single buffer, extra memory allocations, Python objects, and Python function
+        calls are avoided. This is ideal for scenarios where callers know up front that
+        they need to access data for multiple frames, such as when  *delta chains* are
+        being used.
+
+        Currently, the implementation always spawns multiple threads when requested,
+        even if the amount of work to do is small. In the future, it will be smarter
+        about avoiding threads and their associated overhead when the amount of
+        work to do is small.
+
+        :param frames:
+           Source defining zstd frames to decompress.
+        :param decompressed_sizes:
+           Array of integers representing sizes of decompressed zstd frames.
+        :param threads:
+           How many threads to use for decompression operations.
+
+           Negative values will use the same number of threads as logical CPUs
+           on the machine. Values ``0`` or ``1`` use a single thread.
+        :return:
+           ``BufferWithSegmentsCollection``
+        """
+        raise NotImplementedError()
+
+    def _ensure_dctx(self, load_dict=True):
+        lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
+
+        if self._max_window_size:
+            zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+                self._dctx, self._max_window_size
+            )
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "unable to set max window size: %s" % _zstd_error(zresult)
+                )
+
+        zresult = lib.ZSTD_DCtx_setParameter(
+            self._dctx, lib.ZSTD_d_format, self._format
+        )
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError(
+                "unable to set decoding format: %s" % _zstd_error(zresult)
+            )
+
+        if self._dict_data and load_dict:
+            zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError(
+                    "unable to reference prepared dictionary: %s"
+                    % _zstd_error(zresult)
+                )
diff --git a/llmeval-env/lib/python3.10/site-packages/zstandard/py.typed b/llmeval-env/lib/python3.10/site-packages/zstandard/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391