diff --git a/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..456def783200c4ca0f79430cb7f64fb1d8708442 --- /dev/null +++ b/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afde3cede4adc1334c7a9c55de1b1f51c46a626bd30ef08b5c2f72ff3aea40e2 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..de6be322390d87e0d3649bc37bf5ef18da3806ee --- /dev/null +++ b/ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0912f982ddaacb1da8501500b695615fe6baea1b6f3af90a819b1ab533c20c92 +size 50332749 diff --git a/ckpts/universal/global_step40/zero/7.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/7.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cdb8516dde174e319941e6216e1cc6033139b577 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c79525b81c8f0eebbae5022d5f47341e1febbc30438bfb99d81b1746926328e +size 16778317 diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2140970015d099763797d60a98a3a3b6f4b5f219 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/__init__.py @@ -0,0 +1,14 @@ +""" +Module containing private utility functions +=========================================== + +The ``scipy._lib`` namespace is empty (for now). Tests for all +utilities in submodules of ``_lib`` can be run with:: + + from scipy import _lib + _lib.test() + +""" +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af09a8cd6316fab20439c095aefef71fe59b9465 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6816efd29f89dcf33f9fa3469b561741f7ca2814 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e99f013c50bff155859bd32a2a9df310c34845 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52d30a35f2d430feb49f8e7e2762bf1e20a8583b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f694d65ad5fc1c8b0f3f2c9d8c4b5656c1b8b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31bc6f7255a946ede1195aefe84274eb20126d61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64f5b38ce88d4bc8334d17fbcd67f6c509539b07 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c046f29b0ade6ffae250324d6722e48b395959b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d2af8b53f365a4e4b687e704a61dc12ea885b56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed05fd05b4fbf0b81944ed8d37bf5a4cbf54be2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c833c6af8d71efbb085a003856a6393d643d154 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..779cf429078fdb0b3223a6e526ef0d0c81ab3494 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d49354d2ee2fa4f11051b7860538ce9913561910 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f3ab36e44d25852632e86b6cc6734efec4b701 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a36beb87b49f80e26cdc324ef3b6fc3703809fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2eb4a1d207640e7a40a20962349fa7f51dbbd02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17d831928fd7139749ef43ccc954af35e7700d66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75178ee00f2f3b335bb1a5837c805b4c71be11ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_array_api.py b/venv/lib/python3.10/site-packages/scipy/_lib/_array_api.py new file mode 100644 index 0000000000000000000000000000000000000000..19b9eee98db911df3972a849aad98c2fedfb68fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_array_api.py @@ -0,0 +1,356 @@ +"""Utility functions to use Python Array API compatible libraries. + +For the context about the Array API see: +https://data-apis.org/array-api/latest/purpose_and_scope.html + +The SciPy use case of the Array API is described on the following page: +https://data-apis.org/array-api/latest/use_cases.html#use-case-scipy +""" +from __future__ import annotations + +import os +import warnings + +import numpy as np + +from scipy._lib import array_api_compat +from scipy._lib.array_api_compat import ( + is_array_api_obj, + size, + numpy as np_compat, +) + +__all__ = ['array_namespace', '_asarray', 'size'] + + +# To enable array API and strict array-like input validation +SCIPY_ARRAY_API: str | bool = os.environ.get("SCIPY_ARRAY_API", False) +# To control the default device - for use in the test suite only +SCIPY_DEVICE = os.environ.get("SCIPY_DEVICE", "cpu") + +_GLOBAL_CONFIG = { + "SCIPY_ARRAY_API": SCIPY_ARRAY_API, + "SCIPY_DEVICE": SCIPY_DEVICE, +} + + +def compliance_scipy(arrays): + """Raise exceptions on known-bad subclasses. + + The following subclasses are not supported and raise and error: + - `numpy.ma.MaskedArray` + - `numpy.matrix` + - NumPy arrays which do not have a boolean or numerical dtype + - Any array-like which is neither array API compatible nor coercible by NumPy + - Any array-like which is coerced by NumPy to an unsupported dtype + """ + for i in range(len(arrays)): + array = arrays[i] + if isinstance(array, np.ma.MaskedArray): + raise TypeError("Inputs of type `numpy.ma.MaskedArray` are not supported.") + elif isinstance(array, np.matrix): + raise TypeError("Inputs of type `numpy.matrix` are not supported.") + if isinstance(array, (np.ndarray, np.generic)): + dtype = array.dtype + if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)): + raise TypeError(f"An argument has dtype `{dtype!r}`; " + f"only boolean and numerical dtypes are supported.") + elif not is_array_api_obj(array): + try: + array = np.asanyarray(array) + except TypeError: + raise TypeError("An argument is neither array API compatible nor " + "coercible by NumPy.") + dtype = array.dtype + if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)): + message = ( + f"An argument was coerced to an unsupported dtype `{dtype!r}`; " + f"only boolean and numerical dtypes are supported." + ) + raise TypeError(message) + arrays[i] = array + return arrays + + +def _check_finite(array, xp): + """Check for NaNs or Infs.""" + msg = "array must not contain infs or NaNs" + try: + if not xp.all(xp.isfinite(array)): + raise ValueError(msg) + except TypeError: + raise ValueError(msg) + + +def array_namespace(*arrays): + """Get the array API compatible namespace for the arrays xs. + + Parameters + ---------- + *arrays : sequence of array_like + Arrays used to infer the common namespace. + + Returns + ------- + namespace : module + Common namespace. + + Notes + ----- + Thin wrapper around `array_api_compat.array_namespace`. + + 1. Check for the global switch: SCIPY_ARRAY_API. This can also be accessed + dynamically through ``_GLOBAL_CONFIG['SCIPY_ARRAY_API']``. + 2. `compliance_scipy` raise exceptions on known-bad subclasses. See + its definition for more details. + + When the global switch is False, it defaults to the `numpy` namespace. + In that case, there is no compliance check. This is a convenience to + ease the adoption. Otherwise, arrays must comply with the new rules. + """ + if not _GLOBAL_CONFIG["SCIPY_ARRAY_API"]: + # here we could wrap the namespace if needed + return np_compat + + arrays = [array for array in arrays if array is not None] + + arrays = compliance_scipy(arrays) + + return array_api_compat.array_namespace(*arrays) + + +def _asarray( + array, dtype=None, order=None, copy=None, *, xp=None, check_finite=False +): + """SciPy-specific replacement for `np.asarray` with `order` and `check_finite`. + + Memory layout parameter `order` is not exposed in the Array API standard. + `order` is only enforced if the input array implementation + is NumPy based, otherwise `order` is just silently ignored. + + `check_finite` is also not a keyword in the array API standard; included + here for convenience rather than that having to be a separate function + call inside SciPy functions. + """ + if xp is None: + xp = array_namespace(array) + if xp.__name__ in {"numpy", "scipy._lib.array_api_compat.numpy"}: + # Use NumPy API to support order + if copy is True: + array = np.array(array, order=order, dtype=dtype) + else: + array = np.asarray(array, order=order, dtype=dtype) + + # At this point array is a NumPy ndarray. We convert it to an array + # container that is consistent with the input's namespace. + array = xp.asarray(array) + else: + try: + array = xp.asarray(array, dtype=dtype, copy=copy) + except TypeError: + coerced_xp = array_namespace(xp.asarray(3)) + array = coerced_xp.asarray(array, dtype=dtype, copy=copy) + + if check_finite: + _check_finite(array, xp) + + return array + + +def atleast_nd(x, *, ndim, xp=None): + """Recursively expand the dimension to have at least `ndim`.""" + if xp is None: + xp = array_namespace(x) + x = xp.asarray(x) + if x.ndim < ndim: + x = xp.expand_dims(x, axis=0) + x = atleast_nd(x, ndim=ndim, xp=xp) + return x + + +def copy(x, *, xp=None): + """ + Copies an array. + + Parameters + ---------- + x : array + + xp : array_namespace + + Returns + ------- + copy : array + Copied array + + Notes + ----- + This copy function does not offer all the semantics of `np.copy`, i.e. the + `subok` and `order` keywords are not used. + """ + # Note: xp.asarray fails if xp is numpy. + if xp is None: + xp = array_namespace(x) + + return _asarray(x, copy=True, xp=xp) + + +def is_numpy(xp): + return xp.__name__ in ('numpy', 'scipy._lib.array_api_compat.numpy') + + +def is_cupy(xp): + return xp.__name__ in ('cupy', 'scipy._lib.array_api_compat.cupy') + + +def is_torch(xp): + return xp.__name__ in ('torch', 'scipy._lib.array_api_compat.torch') + + +def _strict_check(actual, desired, xp, + check_namespace=True, check_dtype=True, check_shape=True): + __tracebackhide__ = True # Hide traceback for py.test + if check_namespace: + _assert_matching_namespace(actual, desired) + + desired = xp.asarray(desired) + + if check_dtype: + _msg = "dtypes do not match.\nActual: {actual.dtype}\nDesired: {desired.dtype}" + assert actual.dtype == desired.dtype, _msg + + if check_shape: + _msg = "Shapes do not match.\nActual: {actual.shape}\nDesired: {desired.shape}" + assert actual.shape == desired.shape, _msg + _check_scalar(actual, desired, xp) + + desired = xp.broadcast_to(desired, actual.shape) + return desired + + +def _assert_matching_namespace(actual, desired): + __tracebackhide__ = True # Hide traceback for py.test + actual = actual if isinstance(actual, tuple) else (actual,) + desired_space = array_namespace(desired) + for arr in actual: + arr_space = array_namespace(arr) + _msg = (f"Namespaces do not match.\n" + f"Actual: {arr_space.__name__}\n" + f"Desired: {desired_space.__name__}") + assert arr_space == desired_space, _msg + + +def _check_scalar(actual, desired, xp): + __tracebackhide__ = True # Hide traceback for py.test + # Shape check alone is sufficient unless desired.shape == (). Also, + # only NumPy distinguishes between scalars and arrays. + if desired.shape != () or not is_numpy(xp): + return + # We want to follow the conventions of the `xp` library. Libraries like + # NumPy, for which `np.asarray(0)[()]` returns a scalar, tend to return + # a scalar even when a 0D array might be more appropriate: + # import numpy as np + # np.mean([1, 2, 3]) # scalar, not 0d array + # np.asarray(0)*2 # scalar, not 0d array + # np.sin(np.asarray(0)) # scalar, not 0d array + # Libraries like CuPy, for which `cp.asarray(0)[()]` returns a 0D array, + # tend to return a 0D array in scenarios like those above. + # Therefore, regardless of whether the developer provides a scalar or 0D + # array for `desired`, we would typically want the type of `actual` to be + # the type of `desired[()]`. If the developer wants to override this + # behavior, they can set `check_shape=False`. + desired = desired[()] + _msg = f"Types do not match:\n Actual: {type(actual)}\n Desired: {type(desired)}" + assert (xp.isscalar(actual) and xp.isscalar(desired) + or (not xp.isscalar(actual) and not xp.isscalar(desired))), _msg + + +def xp_assert_equal(actual, desired, check_namespace=True, check_dtype=True, + check_shape=True, err_msg='', xp=None): + __tracebackhide__ = True # Hide traceback for py.test + if xp is None: + xp = array_namespace(actual) + desired = _strict_check(actual, desired, xp, check_namespace=check_namespace, + check_dtype=check_dtype, check_shape=check_shape) + if is_cupy(xp): + return xp.testing.assert_array_equal(actual, desired, err_msg=err_msg) + elif is_torch(xp): + # PyTorch recommends using `rtol=0, atol=0` like this + # to test for exact equality + err_msg = None if err_msg == '' else err_msg + return xp.testing.assert_close(actual, desired, rtol=0, atol=0, equal_nan=True, + check_dtype=False, msg=err_msg) + return np.testing.assert_array_equal(actual, desired, err_msg=err_msg) + + +def xp_assert_close(actual, desired, rtol=1e-07, atol=0, check_namespace=True, + check_dtype=True, check_shape=True, err_msg='', xp=None): + __tracebackhide__ = True # Hide traceback for py.test + if xp is None: + xp = array_namespace(actual) + desired = _strict_check(actual, desired, xp, check_namespace=check_namespace, + check_dtype=check_dtype, check_shape=check_shape) + if is_cupy(xp): + return xp.testing.assert_allclose(actual, desired, rtol=rtol, + atol=atol, err_msg=err_msg) + elif is_torch(xp): + err_msg = None if err_msg == '' else err_msg + return xp.testing.assert_close(actual, desired, rtol=rtol, atol=atol, + equal_nan=True, check_dtype=False, msg=err_msg) + return np.testing.assert_allclose(actual, desired, rtol=rtol, + atol=atol, err_msg=err_msg) + + +def xp_assert_less(actual, desired, check_namespace=True, check_dtype=True, + check_shape=True, err_msg='', verbose=True, xp=None): + __tracebackhide__ = True # Hide traceback for py.test + if xp is None: + xp = array_namespace(actual) + desired = _strict_check(actual, desired, xp, check_namespace=check_namespace, + check_dtype=check_dtype, check_shape=check_shape) + if is_cupy(xp): + return xp.testing.assert_array_less(actual, desired, + err_msg=err_msg, verbose=verbose) + elif is_torch(xp): + if actual.device.type != 'cpu': + actual = actual.cpu() + if desired.device.type != 'cpu': + desired = desired.cpu() + return np.testing.assert_array_less(actual, desired, + err_msg=err_msg, verbose=verbose) + + +def cov(x, *, xp=None): + if xp is None: + xp = array_namespace(x) + + X = copy(x, xp=xp) + dtype = xp.result_type(X, xp.float64) + + X = atleast_nd(X, ndim=2, xp=xp) + X = xp.asarray(X, dtype=dtype) + + avg = xp.mean(X, axis=1) + fact = X.shape[1] - 1 + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + X_T = X.T + if xp.isdtype(X_T.dtype, 'complex floating'): + X_T = xp.conj(X_T) + c = X @ X_T + c /= fact + axes = tuple(axis for axis, length in enumerate(c.shape) if length == 1) + return xp.squeeze(c, axis=axes) + + +def xp_unsupported_param_msg(param): + return f'Providing {param!r} is only supported for numpy arrays.' + + +def is_complex(x, xp): + return xp.isdtype(x.dtype, 'complex floating') diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_bunch.py b/venv/lib/python3.10/site-packages/scipy/_lib/_bunch.py new file mode 100644 index 0000000000000000000000000000000000000000..bb562e4348f46dc1137afe3d3ce50f1149c85376 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_bunch.py @@ -0,0 +1,225 @@ +import sys as _sys +from keyword import iskeyword as _iskeyword + + +def _validate_names(typename, field_names, extra_field_names): + """ + Ensure that all the given names are valid Python identifiers that + do not start with '_'. Also check that there are no duplicates + among field_names + extra_field_names. + """ + for name in [typename] + field_names + extra_field_names: + if not isinstance(name, str): + raise TypeError('typename and all field names must be strings') + if not name.isidentifier(): + raise ValueError('typename and all field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('typename and all field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names + extra_field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Duplicate field name: {name!r}') + seen.add(name) + + +# Note: This code is adapted from CPython:Lib/collections/__init__.py +def _make_tuple_bunch(typename, field_names, extra_field_names=None, + module=None): + """ + Create a namedtuple-like class with additional attributes. + + This function creates a subclass of tuple that acts like a namedtuple + and that has additional attributes. + + The additional attributes are listed in `extra_field_names`. The + values assigned to these attributes are not part of the tuple. + + The reason this function exists is to allow functions in SciPy + that currently return a tuple or a namedtuple to returned objects + that have additional attributes, while maintaining backwards + compatibility. + + This should only be used to enhance *existing* functions in SciPy. + New functions are free to create objects as return values without + having to maintain backwards compatibility with an old tuple or + namedtuple return value. + + Parameters + ---------- + typename : str + The name of the type. + field_names : list of str + List of names of the values to be stored in the tuple. These names + will also be attributes of instances, so the values in the tuple + can be accessed by indexing or as attributes. At least one name + is required. See the Notes for additional restrictions. + extra_field_names : list of str, optional + List of names of values that will be stored as attributes of the + object. See the notes for additional restrictions. + + Returns + ------- + cls : type + The new class. + + Notes + ----- + There are restrictions on the names that may be used in `field_names` + and `extra_field_names`: + + * The names must be unique--no duplicates allowed. + * The names must be valid Python identifiers, and must not begin with + an underscore. + * The names must not be Python keywords (e.g. 'def', 'and', etc., are + not allowed). + + Examples + -------- + >>> from scipy._lib._bunch import _make_tuple_bunch + + Create a class that acts like a namedtuple with length 2 (with field + names `x` and `y`) that will also have the attributes `w` and `beta`: + + >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta']) + + `Result` is the new class. We call it with keyword arguments to create + a new instance with given values. + + >>> result1 = Result(x=1, y=2, w=99, beta=0.5) + >>> result1 + Result(x=1, y=2, w=99, beta=0.5) + + `result1` acts like a tuple of length 2: + + >>> len(result1) + 2 + >>> result1[:] + (1, 2) + + The values assigned when the instance was created are available as + attributes: + + >>> result1.y + 2 + >>> result1.beta + 0.5 + """ + if len(field_names) == 0: + raise ValueError('field_names must contain at least one name') + + if extra_field_names is None: + extra_field_names = [] + _validate_names(typename, field_names, extra_field_names) + + typename = _sys.intern(str(typename)) + field_names = tuple(map(_sys.intern, field_names)) + extra_field_names = tuple(map(_sys.intern, extra_field_names)) + + all_names = field_names + extra_field_names + arg_list = ', '.join(field_names) + full_list = ', '.join(all_names) + repr_fmt = ''.join(('(', + ', '.join(f'{name}=%({name})r' for name in all_names), + ')')) + tuple_new = tuple.__new__ + _dict, _tuple, _zip = dict, tuple, zip + + # Create all the named tuple methods to be added to the class namespace + + s = f"""\ +def __new__(_cls, {arg_list}, **extra_fields): + return _tuple_new(_cls, ({arg_list},)) + +def __init__(self, {arg_list}, **extra_fields): + for key in self._extra_fields: + if key not in extra_fields: + raise TypeError("missing keyword argument '%s'" % (key,)) + for key, val in extra_fields.items(): + if key not in self._extra_fields: + raise TypeError("unexpected keyword argument '%s'" % (key,)) + self.__dict__[key] = val + +def __setattr__(self, key, val): + if key in {repr(field_names)}: + raise AttributeError("can't set attribute %r of class %r" + % (key, self.__class__.__name__)) + else: + self.__dict__[key] = val +""" + del arg_list + namespace = {'_tuple_new': tuple_new, + '__builtins__': dict(TypeError=TypeError, + AttributeError=AttributeError), + '__name__': f'namedtuple_{typename}'} + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({full_list})' + __init__ = namespace['__init__'] + __init__.__doc__ = f'Instantiate instance of {typename}({full_list})' + __setattr__ = namespace['__setattr__'] + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self._asdict() + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + out = _dict(_zip(self._fields, self)) + out.update(self.__dict__) + return out + + def __getnewargs_ex__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self), self.__dict__ + + # Modify function metadata to help with introspection and debugging + for method in (__new__, __repr__, _asdict, __getnewargs_ex__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({full_list})', + '_fields': field_names, + '__new__': __new__, + '__init__': __init__, + '__repr__': __repr__, + '__setattr__': __setattr__, + '_asdict': _asdict, + '_extra_fields': extra_field_names, + '__getnewargs_ex__': __getnewargs_ex__, + } + for index, name in enumerate(field_names): + + def _get(self, index=index): + return self[index] + class_namespace[name] = property(_get) + for name in extra_field_names: + + def _get(self, name=name): + return self.__dict__[name] + class_namespace[name] = property(_get) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the named tuple is created. Bypass this step in environments + # where sys._getframe is not defined (Jython for example) or sys._getframe + # is not defined for arguments greater than 0 (IronPython), or where the + # user has specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + __new__.__module__ = module + + return result diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback.py b/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback.py new file mode 100644 index 0000000000000000000000000000000000000000..1980d06f5489e6633fb611c35bfb56903bd63e7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback.py @@ -0,0 +1,251 @@ +from . import _ccallback_c + +import ctypes + +PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0] + +ffi = None + +class CData: + pass + +def _import_cffi(): + global ffi, CData + + if ffi is not None: + return + + try: + import cffi + ffi = cffi.FFI() + CData = ffi.CData + except ImportError: + ffi = False + + +class LowLevelCallable(tuple): + """ + Low-level callback function. + + Some functions in SciPy take as arguments callback functions, which + can either be python callables or low-level compiled functions. Using + compiled callback functions can improve performance somewhat by + avoiding wrapping data in Python objects. + + Such low-level functions in SciPy are wrapped in `LowLevelCallable` + objects, which can be constructed from function pointers obtained from + ctypes, cffi, Cython, or contained in Python `PyCapsule` objects. + + .. seealso:: + + Functions accepting low-level callables: + + `scipy.integrate.quad`, `scipy.ndimage.generic_filter`, + `scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform` + + Usage examples: + + :ref:`ndimage-ccallbacks`, :ref:`quad-callbacks` + + Parameters + ---------- + function : {PyCapsule, ctypes function pointer, cffi function pointer} + Low-level callback function. + user_data : {PyCapsule, ctypes void pointer, cffi void pointer} + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*, + if possible. + + Attributes + ---------- + function + Callback function given. + user_data + User data given. + signature + Signature of the function. + + Methods + ------- + from_cython + Class method for constructing callables from Cython C-exported + functions. + + Notes + ----- + The argument ``function`` can be one of: + + - PyCapsule, whose name contains the C function signature + - ctypes function pointer + - cffi function pointer + + The signature of the low-level callback must match one of those expected + by the routine it is passed to. + + If constructing low-level functions from a PyCapsule, the name of the + capsule must be the corresponding signature, in the format:: + + return_type (arg1_type, arg2_type, ...) + + For example:: + + "void (double)" + "double (double, int *, void *)" + + The context of a PyCapsule passed in as ``function`` is used as ``user_data``, + if an explicit value for ``user_data`` was not given. + + """ + + # Make the class immutable + __slots__ = () + + def __new__(cls, function, user_data=None, signature=None): + # We need to hold a reference to the function & user data, + # to prevent them going out of scope + item = cls._parse_callback(function, user_data, signature) + return tuple.__new__(cls, (item, function, user_data)) + + def __repr__(self): + return f"LowLevelCallable({self.function!r}, {self.user_data!r})" + + @property + def function(self): + return tuple.__getitem__(self, 1) + + @property + def user_data(self): + return tuple.__getitem__(self, 2) + + @property + def signature(self): + return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0)) + + def __getitem__(self, idx): + raise ValueError() + + @classmethod + def from_cython(cls, module, name, user_data=None, signature=None): + """ + Create a low-level callback function from an exported Cython function. + + Parameters + ---------- + module : module + Cython module where the exported function resides + name : str + Name of the exported function + user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*. + + """ + try: + function = module.__pyx_capi__[name] + except AttributeError as e: + message = "Given module is not a Cython module with __pyx_capi__ attribute" + raise ValueError(message) from e + except KeyError as e: + message = f"No function {name!r} found in __pyx_capi__ of the module" + raise ValueError(message) from e + return cls(function, user_data, signature) + + @classmethod + def _parse_callback(cls, obj, user_data=None, signature=None): + _import_cffi() + + if isinstance(obj, LowLevelCallable): + func = tuple.__getitem__(obj, 0) + elif isinstance(obj, PyCFuncPtr): + func, signature = _get_ctypes_func(obj, signature) + elif isinstance(obj, CData): + func, signature = _get_cffi_func(obj, signature) + elif _ccallback_c.check_capsule(obj): + func = obj + else: + raise ValueError("Given input is not a callable or a " + "low-level callable (pycapsule/ctypes/cffi)") + + if isinstance(user_data, ctypes.c_void_p): + context = _get_ctypes_data(user_data) + elif isinstance(user_data, CData): + context = _get_cffi_data(user_data) + elif user_data is None: + context = 0 + elif _ccallback_c.check_capsule(user_data): + context = user_data + else: + raise ValueError("Given user data is not a valid " + "low-level void* pointer (pycapsule/ctypes/cffi)") + + return _ccallback_c.get_raw_capsule(func, signature, context) + + +# +# ctypes helpers +# + +def _get_ctypes_func(func, signature=None): + # Get function pointer + func_ptr = ctypes.cast(func, ctypes.c_void_p).value + + # Construct function signature + if signature is None: + signature = _typename_from_ctypes(func.restype) + " (" + for j, arg in enumerate(func.argtypes): + if j == 0: + signature += _typename_from_ctypes(arg) + else: + signature += ", " + _typename_from_ctypes(arg) + signature += ")" + + return func_ptr, signature + + +def _typename_from_ctypes(item): + if item is None: + return "void" + elif item is ctypes.c_void_p: + return "void *" + + name = item.__name__ + + pointer_level = 0 + while name.startswith("LP_"): + pointer_level += 1 + name = name[3:] + + if name.startswith('c_'): + name = name[2:] + + if pointer_level > 0: + name += " " + "*"*pointer_level + + return name + + +def _get_ctypes_data(data): + # Get voidp pointer + return ctypes.cast(data, ctypes.c_void_p).value + + +# +# CFFI helpers +# + +def _get_cffi_func(func, signature=None): + # Get function pointer + func_ptr = ffi.cast('uintptr_t', func) + + # Get signature + if signature is None: + signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ') + + return func_ptr, signature + + +def _get_cffi_data(data): + # Get pointer + return ffi.cast('uintptr_t', data) diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d946e01f83e740c4f9f0b3b57184cecd8edc6657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_disjoint_set.py b/venv/lib/python3.10/site-packages/scipy/_lib/_disjoint_set.py new file mode 100644 index 0000000000000000000000000000000000000000..683c5c8e518705e710212dafc01363f92a2f947d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_disjoint_set.py @@ -0,0 +1,254 @@ +""" +Disjoint set data structure +""" + + +class DisjointSet: + """ Disjoint set data structure for incremental connectivity queries. + + .. versionadded:: 1.6.0 + + Attributes + ---------- + n_subsets : int + The number of subsets. + + Methods + ------- + add + merge + connected + subset + subset_size + subsets + __getitem__ + + Notes + ----- + This class implements the disjoint set [1]_, also known as the *union-find* + or *merge-find* data structure. The *find* operation (implemented in + `__getitem__`) implements the *path halving* variant. The *merge* method + implements the *merge by size* variant. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure + + Examples + -------- + >>> from scipy.cluster.hierarchy import DisjointSet + + Initialize a disjoint set: + + >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b']) + + Merge some subsets: + + >>> disjoint_set.merge(1, 2) + True + >>> disjoint_set.merge(3, 'a') + True + >>> disjoint_set.merge('a', 'b') + True + >>> disjoint_set.merge('b', 'b') + False + + Find root elements: + + >>> disjoint_set[2] + 1 + >>> disjoint_set['b'] + 3 + + Test connectivity: + + >>> disjoint_set.connected(1, 2) + True + >>> disjoint_set.connected(1, 'b') + False + + List elements in disjoint set: + + >>> list(disjoint_set) + [1, 2, 3, 'a', 'b'] + + Get the subset containing 'a': + + >>> disjoint_set.subset('a') + {'a', 3, 'b'} + + Get the size of the subset containing 'a' (without actually instantiating + the subset): + + >>> disjoint_set.subset_size('a') + 3 + + Get all subsets in the disjoint set: + + >>> disjoint_set.subsets() + [{1, 2}, {'a', 3, 'b'}] + """ + def __init__(self, elements=None): + self.n_subsets = 0 + self._sizes = {} + self._parents = {} + # _nbrs is a circular linked list which links connected elements. + self._nbrs = {} + # _indices tracks the element insertion order in `__iter__`. + self._indices = {} + if elements is not None: + for x in elements: + self.add(x) + + def __iter__(self): + """Returns an iterator of the elements in the disjoint set. + + Elements are ordered by insertion order. + """ + return iter(self._indices) + + def __len__(self): + return len(self._indices) + + def __contains__(self, x): + return x in self._indices + + def __getitem__(self, x): + """Find the root element of `x`. + + Parameters + ---------- + x : hashable object + Input element. + + Returns + ------- + root : hashable object + Root element of `x`. + """ + if x not in self._indices: + raise KeyError(x) + + # find by "path halving" + parents = self._parents + while self._indices[x] != self._indices[parents[x]]: + parents[x] = parents[parents[x]] + x = parents[x] + return x + + def add(self, x): + """Add element `x` to disjoint set + """ + if x in self._indices: + return + + self._sizes[x] = 1 + self._parents[x] = x + self._nbrs[x] = x + self._indices[x] = len(self._indices) + self.n_subsets += 1 + + def merge(self, x, y): + """Merge the subsets of `x` and `y`. + + The smaller subset (the child) is merged into the larger subset (the + parent). If the subsets are of equal size, the root element which was + first inserted into the disjoint set is selected as the parent. + + Parameters + ---------- + x, y : hashable object + Elements to merge. + + Returns + ------- + merged : bool + True if `x` and `y` were in disjoint sets, False otherwise. + """ + xr = self[x] + yr = self[y] + if self._indices[xr] == self._indices[yr]: + return False + + sizes = self._sizes + if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]): + xr, yr = yr, xr + self._parents[yr] = xr + self._sizes[xr] += self._sizes[yr] + self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr] + self.n_subsets -= 1 + return True + + def connected(self, x, y): + """Test whether `x` and `y` are in the same subset. + + Parameters + ---------- + x, y : hashable object + Elements to test. + + Returns + ------- + result : bool + True if `x` and `y` are in the same set, False otherwise. + """ + return self._indices[self[x]] == self._indices[self[y]] + + def subset(self, x): + """Get the subset containing `x`. + + Parameters + ---------- + x : hashable object + Input element. + + Returns + ------- + result : set + Subset containing `x`. + """ + if x not in self._indices: + raise KeyError(x) + + result = [x] + nxt = self._nbrs[x] + while self._indices[nxt] != self._indices[x]: + result.append(nxt) + nxt = self._nbrs[nxt] + return set(result) + + def subset_size(self, x): + """Get the size of the subset containing `x`. + + Note that this method is faster than ``len(self.subset(x))`` because + the size is directly read off an internal field, without the need to + instantiate the full subset. + + Parameters + ---------- + x : hashable object + Input element. + + Returns + ------- + result : int + Size of the subset containing `x`. + """ + return self._sizes[self[x]] + + def subsets(self): + """Get all the subsets in the disjoint set. + + Returns + ------- + result : list + Subsets in the disjoint set. + """ + result = [] + visited = set() + for x in self: + if x not in visited: + xset = self.subset(x) + visited.update(xset) + result.append(xset) + return result diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_docscrape.py b/venv/lib/python3.10/site-packages/scipy/_lib/_docscrape.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ad8058366fa4df2f2fa870de7e4f0eff79b1e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_docscrape.py @@ -0,0 +1,679 @@ +"""Extract reference documentation from the NumPy source tree. + +""" +# copied from numpydoc/docscrape.py +import inspect +import textwrap +import re +import pydoc +from warnings import warn +from collections import namedtuple +from collections.abc import Callable, Mapping +import copy +import sys + + +def strip_blank_lines(l): + "Remove leading and trailing blank lines from a list of lines" + while l and not l[0].strip(): + del l[0] + while l and not l[-1].strip(): + del l[-1] + return l + + +class Reader: + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\\n'. + + """ + if isinstance(data, list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + + def is_empty(line): + return not line.strip() + + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self, n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class ParseError(Exception): + def __str__(self): + message = self.args[0] + if hasattr(self, 'docstring'): + message = f"{message} in {self.docstring!r}" + return message + + +Parameter = namedtuple('Parameter', ['name', 'type', 'desc']) + + +class NumpyDocString(Mapping): + """Parses a numpydoc string to an abstract representation + + Instances define a mapping from section title to structured data. + + """ + + sections = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Receives': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + def __init__(self, docstring, config={}): + orig_docstring = docstring + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = copy.deepcopy(self.sections) + + try: + self._parse() + except ParseError as e: + e.docstring = orig_docstring + raise + + def __getitem__(self, key): + return self._parsed_data[key] + + def __setitem__(self, key, val): + if key not in self._parsed_data: + self._error_location("Unknown section %s" % key, error=False) + else: + self._parsed_data[key] = val + + def __iter__(self): + return iter(self._parsed_data) + + def __len__(self): + return len(self._parsed_data) + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self, doc): + i = 0 + j = 0 + for i, line in enumerate(doc): + if line.strip(): + break + + for j, line in enumerate(doc[::-1]): + if line.strip(): + break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self, content, single_element_is_type=False): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + if single_element_is_type: + arg_name, arg_type = '', header + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + desc = strip_blank_lines(desc) + + params.append(Parameter(arg_name, arg_type, desc)) + + return params + + # See also supports the following formats. + # + # + # SPACE* COLON SPACE+ SPACE* + # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE* + # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE* + + # is one of + # + # COLON COLON BACKTICK BACKTICK + # where + # is a legal function name, and + # is any nonempty sequence of word characters. + # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j` + # is a string describing the function. + + _role = r":(?P\w+):" + _funcbacktick = r"`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`" + _funcplain = r"(?P[a-zA-Z0-9_\.-]+)" + _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")" + _funcnamenext = _funcname.replace('role', 'rolenext') + _funcnamenext = _funcnamenext.replace('name', 'namenext') + _description = r"(?P\s*:(\s+(?P\S+.*))?)?\s*$" + _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*") + _line_rgx = re.compile( + r"^\s*" + + r"(?P" + # group for all function names + _funcname + + r"(?P([,]\s+" + _funcnamenext + r")*)" + + r")" + # end of "allfuncs" + # Some function lists have a trailing comma (or period) '\s*' + r"(?P[,\.])?" + + _description) + + # Empty elements are replaced with '..' + empty_description = '..' + + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'.""" + m = self._func_rgx.match(text) + if not m: + raise ParseError("%s is not a item name" % text) + role = m.group('role') + name = m.group('name') if role else m.group('name2') + return name, role, m.end() + + rest = [] + for line in content: + if not line.strip(): + continue + + line_match = self._line_rgx.match(line) + description = None + if line_match: + description = line_match.group('desc') + if line_match.group('trailing') and description: + self._error_location( + 'Unexpected comma or period after function list at ' + 'index %d of line "%s"' % (line_match.end('trailing'), + line), + error=False) + if not description and line.startswith(' '): + rest.append(line.strip()) + elif line_match: + funcs = [] + text = line_match.group('allfuncs') + while True: + if not text.strip(): + break + name, role, match_end = parse_item_name(text) + funcs.append((name, role)) + text = text[match_end:].strip() + if text and text[0] == ',': + text = text[1:].strip() + rest = list(filter(None, [description])) + items.append((funcs, rest)) + else: + raise ParseError("%s is not a item name" % line) + return items + + def _parse_index(self, section, content): + """ + .. index:: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$') + if compiled.match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + sections = list(self._read_sections()) + section_names = {section for section, content in sections} + + has_returns = 'Returns' in section_names + has_yields = 'Yields' in section_names + # We could do more tests, but we are not. Arbitrarily. + if has_returns and has_yields: + msg = 'Docstring contains both a Returns and Yields section.' + raise ValueError(msg) + if not has_yields and 'Receives' in section_names: + msg = 'Docstring contains a Receives section but not Yields.' + raise ValueError(msg) + + for (section, content) in sections: + if not section.startswith('..'): + section = (s.capitalize() for s in section.split(' ')) + section = ' '.join(section) + if self.get(section): + self._error_location("The section %s appears twice" + % section) + + if section in ('Parameters', 'Other Parameters', 'Attributes', + 'Methods'): + self[section] = self._parse_param_list(content) + elif section in ('Returns', 'Yields', 'Raises', 'Warns', + 'Receives'): + self[section] = self._parse_param_list( + content, single_element_is_type=True) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + def _error_location(self, msg, error=True): + if hasattr(self, '_obj'): + # we know where the docs came from: + try: + filename = inspect.getsourcefile(self._obj) + except TypeError: + filename = None + msg = msg + (f" in the docstring of {self._obj} in {filename}.") + if error: + raise ValueError(msg) + else: + warn(msg, stacklevel=3) + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*', r'\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param in self[name]: + parts = [] + if param.name: + parts.append(param.name) + if param.type: + parts.append(param.type) + out += [' : '.join(parts)] + if param.desc and ''.join(param.desc).strip(): + out += self._str_indent(param.desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: + return [] + out = [] + out += self._str_header("See Also") + out += [''] + last_had_desc = True + for funcs, desc in self['See Also']: + assert isinstance(funcs, list) + links = [] + for func, role in funcs: + if role: + link = f':{role}:`{func}`' + elif func_role: + link = f':{func_role}:`{func}`' + else: + link = "`%s`_" % func + links.append(link) + link = ', '.join(links) + out += [link] + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += self._str_indent([self.empty_description]) + + if last_had_desc: + out += [''] + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + output_index = False + default_index = idx.get('default', '') + if default_index: + output_index = True + out += ['.. index:: %s' % default_index] + for section, references in idx.items(): + if section == 'default': + continue + output_index = True + out += [' :{}: {}'.format(section, ', '.join(references))] + if output_index: + return out + else: + return '' + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Yields', 'Receives', + 'Other Parameters', 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes', 'References', 'Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def indent(str, indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config={}): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + NumpyDocString.__init__(self, doc, config) + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. {}:: {}\n \n\n'.format(roles.get(self._role, ''), + func_name) + + out += super().__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config={}): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + if 'sphinx' in sys.modules: + from sphinx.ext.autodoc import ALL + else: + ALL = object() + + self.show_inherited_members = config.get( + 'show_inherited_class_members', True) + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + _members = config.get('members', []) + if _members is ALL: + _members = None + _exclude = config.get('exclude-members', []) + + if config.get('show_class_members', True) and _exclude is not ALL: + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + if (name in _exclude or + (_members and name not in _members)): + continue + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append( + Parameter(name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, Callable) + and self._is_show_member(name))] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if (not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isdatadescriptor(func)) + and self._is_show_member(name))] + + def _is_show_member(self, name): + if self.show_inherited_members: + return True # show all class members + if name not in self._cls.__dict__: + return False # class member is inherited, we do not show it + return True diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_elementwise_iterative_method.py b/venv/lib/python3.10/site-packages/scipy/_lib/_elementwise_iterative_method.py new file mode 100644 index 0000000000000000000000000000000000000000..1c7820bbfdff1b6c15032526d87756e95d021314 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_elementwise_iterative_method.py @@ -0,0 +1,320 @@ +# `_elementwise_iterative_method.py` includes tools for writing functions that +# - are vectorized to work elementwise on arrays, +# - implement non-trivial, iterative algorithms with a callback interface, and +# - return rich objects with iteration count, termination status, etc. +# +# Examples include: +# `scipy.optimize._chandrupatla._chandrupatla for scalar rootfinding, +# `scipy.optimize._chandrupatla._chandrupatla_minimize for scalar minimization, +# `scipy.optimize._differentiate._differentiate for numerical differentiation, +# `scipy.optimize._bracket._bracket_root for finding rootfinding brackets, +# `scipy.optimize._bracket._bracket_minimize for finding minimization brackets, +# `scipy.integrate._tanhsinh._tanhsinh` for numerical quadrature. + +import numpy as np +from ._util import _RichResult, _call_callback_maybe_halt + +_ESIGNERR = -1 +_ECONVERR = -2 +_EVALUEERR = -3 +_ECALLBACK = -4 +_ECONVERGED = 0 +_EINPROGRESS = 1 + +def _initialize(func, xs, args, complex_ok=False, preserve_shape=None): + """Initialize abscissa, function, and args arrays for elementwise function + + Parameters + ---------- + func : callable + An elementwise function with signature + + func(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + xs : tuple of arrays + Finite real abscissa arrays. Must be broadcastable. + args : tuple, optional + Additional positional arguments to be passed to `func`. + preserve_shape : bool, default:False + When ``preserve_shape=False`` (default), `func` may be passed + arguments of any shape; `_scalar_optimization_loop` is permitted + to reshape and compress arguments at will. When + ``preserve_shape=False``, arguments passed to `func` must have shape + `shape` or ``shape + (n,)``, where ``n`` is any integer. + + Returns + ------- + xs, fs, args : tuple of arrays + Broadcasted, writeable, 1D abscissa and function value arrays (or + NumPy floats, if appropriate). The dtypes of the `xs` and `fs` are + `xfat`; the dtype of the `args` are unchanged. + shape : tuple of ints + Original shape of broadcasted arrays. + xfat : NumPy dtype + Result dtype of abscissae, function values, and args determined using + `np.result_type`, except integer types are promoted to `np.float64`. + + Raises + ------ + ValueError + If the result dtype is not that of a real scalar + + Notes + ----- + Useful for initializing the input of SciPy functions that accept + an elementwise callable, abscissae, and arguments; e.g. + `scipy.optimize._chandrupatla`. + """ + nx = len(xs) + + # Try to preserve `dtype`, but we need to ensure that the arguments are at + # least floats before passing them into the function; integers can overflow + # and cause failure. + # There might be benefit to combining the `xs` into a single array and + # calling `func` once on the combined array. For now, keep them separate. + xas = np.broadcast_arrays(*xs, *args) # broadcast and rename + xat = np.result_type(*[xa.dtype for xa in xas]) + xat = np.float64 if np.issubdtype(xat, np.integer) else xat + xs, args = xas[:nx], xas[nx:] + xs = [x.astype(xat, copy=False)[()] for x in xs] + fs = [np.asarray(func(x, *args)) for x in xs] + shape = xs[0].shape + fshape = fs[0].shape + + if preserve_shape: + # bind original shape/func now to avoid late-binding gotcha + def func(x, *args, shape=shape, func=func, **kwargs): + i = (0,)*(len(fshape) - len(shape)) + return func(x[i], *args, **kwargs) + shape = np.broadcast_shapes(fshape, shape) + xs = [np.broadcast_to(x, shape) for x in xs] + args = [np.broadcast_to(arg, shape) for arg in args] + + message = ("The shape of the array returned by `func` must be the same as " + "the broadcasted shape of `x` and all other `args`.") + if preserve_shape is not None: # only in tanhsinh for now + message = f"When `preserve_shape=False`, {message.lower()}" + shapes_equal = [f.shape == shape for f in fs] + if not np.all(shapes_equal): + raise ValueError(message) + + # These algorithms tend to mix the dtypes of the abscissae and function + # values, so figure out what the result will be and convert them all to + # that type from the outset. + xfat = np.result_type(*([f.dtype for f in fs] + [xat])) + if not complex_ok and not np.issubdtype(xfat, np.floating): + raise ValueError("Abscissae and function output must be real numbers.") + xs = [x.astype(xfat, copy=True)[()] for x in xs] + fs = [f.astype(xfat, copy=True)[()] for f in fs] + + # To ensure that we can do indexing, we'll work with at least 1d arrays, + # but remember the appropriate shape of the output. + xs = [x.ravel() for x in xs] + fs = [f.ravel() for f in fs] + args = [arg.flatten() for arg in args] + return func, xs, fs, args, shape, xfat + + +def _loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval, + post_func_eval, check_termination, post_termination_check, + customize_result, res_work_pairs, preserve_shape=False): + """Main loop of a vectorized scalar optimization algorithm + + Parameters + ---------- + work : _RichResult + All variables that need to be retained between iterations. Must + contain attributes `nit`, `nfev`, and `success` + callback : callable + User-specified callback function + shape : tuple of ints + The shape of all output arrays + maxiter : + Maximum number of iterations of the algorithm + func : callable + The user-specified callable that is being optimized or solved + args : tuple + Additional positional arguments to be passed to `func`. + dtype : NumPy dtype + The common dtype of all abscissae and function values + pre_func_eval : callable + A function that accepts `work` and returns `x`, the active elements + of `x` at which `func` will be evaluated. May modify attributes + of `work` with any algorithmic steps that need to happen + at the beginning of an iteration, before `func` is evaluated, + post_func_eval : callable + A function that accepts `x`, `func(x)`, and `work`. May modify + attributes of `work` with any algorithmic steps that need to happen + in the middle of an iteration, after `func` is evaluated but before + the termination check. + check_termination : callable + A function that accepts `work` and returns `stop`, a boolean array + indicating which of the active elements have met a termination + condition. + post_termination_check : callable + A function that accepts `work`. May modify `work` with any algorithmic + steps that need to happen after the termination check and before the + end of the iteration. + customize_result : callable + A function that accepts `res` and `shape` and returns `shape`. May + modify `res` (in-place) according to preferences (e.g. rearrange + elements between attributes) and modify `shape` if needed. + res_work_pairs : list of (str, str) + Identifies correspondence between attributes of `res` and attributes + of `work`; i.e., attributes of active elements of `work` will be + copied to the appropriate indices of `res` when appropriate. The order + determines the order in which _RichResult attributes will be + pretty-printed. + + Returns + ------- + res : _RichResult + The final result object + + Notes + ----- + Besides providing structure, this framework provides several important + services for a vectorized optimization algorithm. + + - It handles common tasks involving iteration count, function evaluation + count, a user-specified callback, and associated termination conditions. + - It compresses the attributes of `work` to eliminate unnecessary + computation on elements that have already converged. + + """ + cb_terminate = False + + # Initialize the result object and active element index array + n_elements = int(np.prod(shape)) + active = np.arange(n_elements) # in-progress element indices + res_dict = {i: np.zeros(n_elements, dtype=dtype) for i, j in res_work_pairs} + res_dict['success'] = np.zeros(n_elements, dtype=bool) + res_dict['status'] = np.full(n_elements, _EINPROGRESS) + res_dict['nit'] = np.zeros(n_elements, dtype=int) + res_dict['nfev'] = np.zeros(n_elements, dtype=int) + res = _RichResult(res_dict) + work.args = args + + active = _check_termination(work, res, res_work_pairs, active, + check_termination, preserve_shape) + + if callback is not None: + temp = _prepare_result(work, res, res_work_pairs, active, shape, + customize_result, preserve_shape) + if _call_callback_maybe_halt(callback, temp): + cb_terminate = True + + while work.nit < maxiter and active.size and not cb_terminate and n_elements: + x = pre_func_eval(work) + + if work.args and work.args[0].ndim != x.ndim: + # `x` always starts as 1D. If the SciPy function that uses + # _loop added dimensions to `x`, we need to + # add them to the elements of `args`. + dims = np.arange(x.ndim, dtype=np.int64) + work.args = [np.expand_dims(arg, tuple(dims[arg.ndim:])) + for arg in work.args] + + x_shape = x.shape + if preserve_shape: + x = x.reshape(shape + (-1,)) + f = func(x, *work.args) + f = np.asarray(f, dtype=dtype) + if preserve_shape: + x = x.reshape(x_shape) + f = f.reshape(x_shape) + work.nfev += 1 if x.ndim == 1 else x.shape[-1] + + post_func_eval(x, f, work) + + work.nit += 1 + active = _check_termination(work, res, res_work_pairs, active, + check_termination, preserve_shape) + + if callback is not None: + temp = _prepare_result(work, res, res_work_pairs, active, shape, + customize_result, preserve_shape) + if _call_callback_maybe_halt(callback, temp): + cb_terminate = True + break + if active.size == 0: + break + + post_termination_check(work) + + work.status[:] = _ECALLBACK if cb_terminate else _ECONVERR + return _prepare_result(work, res, res_work_pairs, active, shape, + customize_result, preserve_shape) + + +def _check_termination(work, res, res_work_pairs, active, check_termination, + preserve_shape): + # Checks termination conditions, updates elements of `res` with + # corresponding elements of `work`, and compresses `work`. + + stop = check_termination(work) + + if np.any(stop): + # update the active elements of the result object with the active + # elements for which a termination condition has been met + _update_active(work, res, res_work_pairs, active, stop, preserve_shape) + + if preserve_shape: + stop = stop[active] + + proceed = ~stop + active = active[proceed] + + if not preserve_shape: + # compress the arrays to avoid unnecessary computation + for key, val in work.items(): + work[key] = val[proceed] if isinstance(val, np.ndarray) else val + work.args = [arg[proceed] for arg in work.args] + + return active + + +def _update_active(work, res, res_work_pairs, active, mask, preserve_shape): + # Update `active` indices of the arrays in result object `res` with the + # contents of the scalars and arrays in `update_dict`. When provided, + # `mask` is a boolean array applied both to the arrays in `update_dict` + # that are to be used and to the arrays in `res` that are to be updated. + update_dict = {key1: work[key2] for key1, key2 in res_work_pairs} + update_dict['success'] = work.status == 0 + + if mask is not None: + if preserve_shape: + active_mask = np.zeros_like(mask) + active_mask[active] = 1 + active_mask = active_mask & mask + for key, val in update_dict.items(): + res[key][active_mask] = (val[active_mask] if np.size(val) > 1 + else val) + else: + active_mask = active[mask] + for key, val in update_dict.items(): + res[key][active_mask] = val[mask] if np.size(val) > 1 else val + else: + for key, val in update_dict.items(): + if preserve_shape and not np.isscalar(val): + val = val[active] + res[key][active] = val + + +def _prepare_result(work, res, res_work_pairs, active, shape, customize_result, + preserve_shape): + # Prepare the result object `res` by creating a copy, copying the latest + # data from work, running the provided result customization function, + # and reshaping the data to the original shapes. + res = res.copy() + _update_active(work, res, res_work_pairs, active, None, preserve_shape) + + shape = customize_result(res, shape) + + for key, val in res.items(): + res[key] = np.reshape(val, shape)[()] + res['_order_keys'] = ['success'] + [i for i, j in res_work_pairs] + return _RichResult(**res) diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_finite_differences.py b/venv/lib/python3.10/site-packages/scipy/_lib/_finite_differences.py new file mode 100644 index 0000000000000000000000000000000000000000..506057b48b3f49244e1ed6cd755fad8ad43d8739 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_finite_differences.py @@ -0,0 +1,145 @@ +from numpy import arange, newaxis, hstack, prod, array + + +def _central_diff_weights(Np, ndiv=1): + """ + Return weights for an Np-point central derivative. + + Assumes equally-spaced function points. + + If weights are in the vector w, then + derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) + + Parameters + ---------- + Np : int + Number of points for the central derivative. + ndiv : int, optional + Number of divisions. Default is 1. + + Returns + ------- + w : ndarray + Weights for an Np-point central derivative. Its size is `Np`. + + Notes + ----- + Can be inaccurate for a large number of points. + + Examples + -------- + We can calculate a derivative value of a function. + + >>> def f(x): + ... return 2 * x**2 + 3 + >>> x = 3.0 # derivative point + >>> h = 0.1 # differential step + >>> Np = 3 # point number for central derivative + >>> weights = _central_diff_weights(Np) # weights for first derivative + >>> vals = [f(x + (i - Np/2) * h) for i in range(Np)] + >>> sum(w * v for (w, v) in zip(weights, vals))/h + 11.79999999999998 + + This value is close to the analytical solution: + f'(x) = 4x, so f'(3) = 12 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Finite_difference + + """ + if Np < ndiv + 1: + raise ValueError( + "Number of points must be at least the derivative order + 1." + ) + if Np % 2 == 0: + raise ValueError("The number of points must be odd.") + from scipy import linalg + + ho = Np >> 1 + x = arange(-ho, ho + 1.0) + x = x[:, newaxis] + X = x**0.0 + for k in range(1, Np): + X = hstack([X, x**k]) + w = prod(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv] + return w + + +def _derivative(func, x0, dx=1.0, n=1, args=(), order=3): + """ + Find the nth derivative of a function at a point. + + Given a function, use a central difference formula with spacing `dx` to + compute the nth derivative at `x0`. + + Parameters + ---------- + func : function + Input function. + x0 : float + The point at which the nth derivative is found. + dx : float, optional + Spacing. + n : int, optional + Order of the derivative. Default is 1. + args : tuple, optional + Arguments + order : int, optional + Number of points to use, must be odd. + + Notes + ----- + Decreasing the step size too small can result in round-off error. + + Examples + -------- + >>> def f(x): + ... return x**3 + x**2 + >>> _derivative(f, 1.0, dx=1e-6) + 4.9999999999217337 + + """ + if order < n + 1: + raise ValueError( + "'order' (the number of points used to compute the derivative), " + "must be at least the derivative order 'n' + 1." + ) + if order % 2 == 0: + raise ValueError( + "'order' (the number of points used to compute the derivative) " + "must be odd." + ) + # pre-computed for n=1 and 2 and low-order for speed. + if n == 1: + if order == 3: + weights = array([-1, 0, 1]) / 2.0 + elif order == 5: + weights = array([1, -8, 0, 8, -1]) / 12.0 + elif order == 7: + weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0 + elif order == 9: + weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0 + else: + weights = _central_diff_weights(order, 1) + elif n == 2: + if order == 3: + weights = array([1, -2.0, 1]) + elif order == 5: + weights = array([-1, 16, -30, 16, -1]) / 12.0 + elif order == 7: + weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0 + elif order == 9: + weights = ( + array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9]) + / 5040.0 + ) + else: + weights = _central_diff_weights(order, 2) + else: + weights = _central_diff_weights(order, n) + val = 0.0 + ho = order >> 1 + for k in range(order): + val += weights[k] * func(x0 + (k - ho) * dx, *args) + return val / prod((dx,) * n, axis=0) diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3a443899bde3481ed6c1359eff4ef9696f6c8e4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_gcutils.py b/venv/lib/python3.10/site-packages/scipy/_lib/_gcutils.py new file mode 100644 index 0000000000000000000000000000000000000000..854ae36228614f3eb8849e9f95abf0dd387b5d35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_gcutils.py @@ -0,0 +1,105 @@ +""" +Module for testing automatic garbage collection of objects + +.. autosummary:: + :toctree: generated/ + + set_gc_state - enable or disable garbage collection + gc_state - context manager for given state of garbage collector + assert_deallocated - context manager to check for circular references on object + +""" +import weakref +import gc + +from contextlib import contextmanager +from platform import python_implementation + +__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated'] + + +IS_PYPY = python_implementation() == 'PyPy' + + +class ReferenceError(AssertionError): + pass + + +def set_gc_state(state): + """ Set status of garbage collector """ + if gc.isenabled() == state: + return + if state: + gc.enable() + else: + gc.disable() + + +@contextmanager +def gc_state(state): + """ Context manager to set state of garbage collector to `state` + + Parameters + ---------- + state : bool + True for gc enabled, False for disabled + + Examples + -------- + >>> with gc_state(False): + ... assert not gc.isenabled() + >>> with gc_state(True): + ... assert gc.isenabled() + """ + orig_state = gc.isenabled() + set_gc_state(state) + yield + set_gc_state(orig_state) + + +@contextmanager +def assert_deallocated(func, *args, **kwargs): + """Context manager to check that object is deallocated + + This is useful for checking that an object can be freed directly by + reference counting, without requiring gc to break reference cycles. + GC is disabled inside the context manager. + + This check is not available on PyPy. + + Parameters + ---------- + func : callable + Callable to create object to check + \\*args : sequence + positional arguments to `func` in order to create object to check + \\*\\*kwargs : dict + keyword arguments to `func` in order to create object to check + + Examples + -------- + >>> class C: pass + >>> with assert_deallocated(C) as c: + ... # do something + ... del c + + >>> class C: + ... def __init__(self): + ... self._circular = self # Make circular reference + >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL + ... # do something + ... del c + Traceback (most recent call last): + ... + ReferenceError: Remaining reference(s) to object + """ + if IS_PYPY: + raise RuntimeError("assert_deallocated is unavailable on PyPy") + + with gc_state(False): + obj = func(*args, **kwargs) + ref = weakref.ref(obj) + yield obj + del obj + if ref() is not None: + raise ReferenceError("Remaining reference(s) to object") diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_pep440.py b/venv/lib/python3.10/site-packages/scipy/_lib/_pep440.py new file mode 100644 index 0000000000000000000000000000000000000000..d546e32a0349461a0aab76bfb4636ebf25227ca0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_pep440.py @@ -0,0 +1,487 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return f"" + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return f""
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(f".post{self._version.post[1]}")
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(f".dev{self._version.dev[1]}")
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..bfb217d3ba8618170d57b5451f369660eb4ede64
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..1cd705672029182570b7fed93f32678bc32ec02f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..11ae6dec7cb241de7937b4d2b3b427525badfbbf
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_testutils.py b/venv/lib/python3.10/site-packages/scipy/_lib/_testutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..69f4f09e01b0f2ca9f024813498b51ccabe183b4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/_testutils.py
@@ -0,0 +1,253 @@
+"""
+Generic test utilities.
+
+"""
+
+import os
+import re
+import sys
+import numpy as np
+import inspect
+import sysconfig
+
+
+__all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL']
+
+
+IS_MUSL = False
+# alternate way is
+# from packaging.tags import sys_tags
+#     _tags = list(sys_tags())
+#     if 'musllinux' in _tags[0].platform:
+_v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
+if 'musl' in _v:
+    IS_MUSL = True
+
+
+class FPUModeChangeWarning(RuntimeWarning):
+    """Warning about FPU mode change"""
+    pass
+
+
+class PytestTester:
+    """
+    Run tests for this namespace
+
+    ``scipy.test()`` runs tests for all of SciPy, with the default settings.
+    When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
+    for that namespace are run.
+
+    Parameters
+    ----------
+    label : {'fast', 'full'}, optional
+        Whether to run only the fast tests, or also those marked as slow.
+        Default is 'fast'.
+    verbose : int, optional
+        Test output verbosity. Default is 1.
+    extra_argv : list, optional
+        Arguments to pass through to Pytest.
+    doctests : bool, optional
+        Whether to run doctests or not. Default is False.
+    coverage : bool, optional
+        Whether to run tests with code coverage measurements enabled.
+        Default is False.
+    tests : list of str, optional
+        List of module names to run tests for. By default, uses the module
+        from which the ``test`` function is called.
+    parallel : int, optional
+        Run tests in parallel with pytest-xdist, if number given is larger than
+        1. Default is 1.
+
+    """
+    def __init__(self, module_name):
+        self.module_name = module_name
+
+    def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
+                 coverage=False, tests=None, parallel=None):
+        import pytest
+
+        module = sys.modules[self.module_name]
+        module_path = os.path.abspath(module.__path__[0])
+
+        pytest_args = ['--showlocals', '--tb=short']
+
+        if doctests:
+            raise ValueError("Doctests not supported")
+
+        if extra_argv:
+            pytest_args += list(extra_argv)
+
+        if verbose and int(verbose) > 1:
+            pytest_args += ["-" + "v"*(int(verbose)-1)]
+
+        if coverage:
+            pytest_args += ["--cov=" + module_path]
+
+        if label == "fast":
+            pytest_args += ["-m", "not slow"]
+        elif label != "full":
+            pytest_args += ["-m", label]
+
+        if tests is None:
+            tests = [self.module_name]
+
+        if parallel is not None and parallel > 1:
+            if _pytest_has_xdist():
+                pytest_args += ['-n', str(parallel)]
+            else:
+                import warnings
+                warnings.warn('Could not run tests in parallel because '
+                              'pytest-xdist plugin is not available.',
+                              stacklevel=2)
+
+        pytest_args += ['--pyargs'] + list(tests)
+
+        try:
+            code = pytest.main(pytest_args)
+        except SystemExit as exc:
+            code = exc.code
+
+        return (code == 0)
+
+
+class _TestPythranFunc:
+    '''
+    These are situations that can be tested in our pythran tests:
+    - A function with multiple array arguments and then
+      other positional and keyword arguments.
+    - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
+    Note: list/tuple input is not yet tested!
+
+    `self.arguments`: A dictionary which key is the index of the argument,
+                      value is tuple(array value, all supported dtypes)
+    `self.partialfunc`: A function used to freeze some non-array argument
+                        that of no interests in the original function
+    '''
+    ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
+    ALL_FLOAT = [np.float32, np.float64]
+    ALL_COMPLEX = [np.complex64, np.complex128]
+
+    def setup_method(self):
+        self.arguments = {}
+        self.partialfunc = None
+        self.expected = None
+
+    def get_optional_args(self, func):
+        # get optional arguments with its default value,
+        # used for testing keywords
+        signature = inspect.signature(func)
+        optional_args = {}
+        for k, v in signature.parameters.items():
+            if v.default is not inspect.Parameter.empty:
+                optional_args[k] = v.default
+        return optional_args
+
+    def get_max_dtype_list_length(self):
+        # get the max supported dtypes list length in all arguments
+        max_len = 0
+        for arg_idx in self.arguments:
+            cur_len = len(self.arguments[arg_idx][1])
+            if cur_len > max_len:
+                max_len = cur_len
+        return max_len
+
+    def get_dtype(self, dtype_list, dtype_idx):
+        # get the dtype from dtype_list via index
+        # if the index is out of range, then return the last dtype
+        if dtype_idx > len(dtype_list)-1:
+            return dtype_list[-1]
+        else:
+            return dtype_list[dtype_idx]
+
+    def test_all_dtypes(self):
+        for type_idx in range(self.get_max_dtype_list_length()):
+            args_array = []
+            for arg_idx in self.arguments:
+                new_dtype = self.get_dtype(self.arguments[arg_idx][1],
+                                           type_idx)
+                args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
+            self.pythranfunc(*args_array)
+
+    def test_views(self):
+        args_array = []
+        for arg_idx in self.arguments:
+            args_array.append(self.arguments[arg_idx][0][::-1][::-1])
+        self.pythranfunc(*args_array)
+
+    def test_strided(self):
+        args_array = []
+        for arg_idx in self.arguments:
+            args_array.append(np.repeat(self.arguments[arg_idx][0],
+                                        2, axis=0)[::2])
+        self.pythranfunc(*args_array)
+
+
+def _pytest_has_xdist():
+    """
+    Check if the pytest-xdist plugin is installed, providing parallel tests
+    """
+    # Check xdist exists without importing, otherwise pytests emits warnings
+    from importlib.util import find_spec
+    return find_spec('xdist') is not None
+
+
+def check_free_memory(free_mb):
+    """
+    Check *free_mb* of memory is available, otherwise do pytest.skip
+    """
+    import pytest
+
+    try:
+        mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
+        msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
+            free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
+    except KeyError:
+        mem_free = _get_mem_available()
+        if mem_free is None:
+            pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
+                        "variable to free memory in MB to run the test.")
+        msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
+
+    if mem_free < free_mb * 1e6:
+        pytest.skip(msg)
+
+
+def _parse_size(size_str):
+    suffixes = {'': 1e6,
+                'b': 1.0,
+                'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
+                'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
+                'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
+    m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
+                 size_str,
+                 re.I)
+    if not m or m.group(2) not in suffixes:
+        raise ValueError("Invalid size string")
+
+    return float(m.group(1)) * suffixes[m.group(2)]
+
+
+def _get_mem_available():
+    """
+    Get information about memory available, not counting swap.
+    """
+    try:
+        import psutil
+        return psutil.virtual_memory().available
+    except (ImportError, AttributeError):
+        pass
+
+    if sys.platform.startswith('linux'):
+        info = {}
+        with open('/proc/meminfo') as f:
+            for line in f:
+                p = line.split()
+                info[p[0].strip(':').lower()] = float(p[1]) * 1e3
+
+        if 'memavailable' in info:
+            # Linux >= 3.14
+            return info['memavailable']
+        else:
+            return info['memfree'] + info['cached']
+
+    return None
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_threadsafety.py b/venv/lib/python3.10/site-packages/scipy/_lib/_threadsafety.py
new file mode 100644
index 0000000000000000000000000000000000000000..feea0c5923903b0b751e66bdf192e7f1d2b7ac67
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/_threadsafety.py
@@ -0,0 +1,58 @@
+import threading
+
+import scipy._lib.decorator
+
+
+__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
+
+
+class ReentrancyError(RuntimeError):
+    pass
+
+
+class ReentrancyLock:
+    """
+    Threading lock that raises an exception for reentrant calls.
+
+    Calls from different threads are serialized, and nested calls from the
+    same thread result to an error.
+
+    The object can be used as a context manager or to decorate functions
+    via the decorate() method.
+
+    """
+
+    def __init__(self, err_msg):
+        self._rlock = threading.RLock()
+        self._entered = False
+        self._err_msg = err_msg
+
+    def __enter__(self):
+        self._rlock.acquire()
+        if self._entered:
+            self._rlock.release()
+            raise ReentrancyError(self._err_msg)
+        self._entered = True
+
+    def __exit__(self, type, value, traceback):
+        self._entered = False
+        self._rlock.release()
+
+    def decorate(self, func):
+        def caller(func, *a, **kw):
+            with self:
+                return func(*a, **kw)
+        return scipy._lib.decorator.decorate(func, caller)
+
+
+def non_reentrant(err_msg=None):
+    """
+    Decorate a function with a threading lock and prevent reentrant calls.
+    """
+    def decorator(func):
+        msg = err_msg
+        if msg is None:
+            msg = "%s is not re-entrant" % func.__name__
+        lock = ReentrancyLock(msg)
+        return lock.decorate(func)
+    return decorator
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_tmpdirs.py b/venv/lib/python3.10/site-packages/scipy/_lib/_tmpdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f9fd546a9d2ae3e9a20c0684f79eb0b3d61ee92
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/_tmpdirs.py
@@ -0,0 +1,86 @@
+''' Contexts for *with* statement providing temporary directories
+'''
+import os
+from contextlib import contextmanager
+from shutil import rmtree
+from tempfile import mkdtemp
+
+
+@contextmanager
+def tempdir():
+    """Create and return a temporary directory. This has the same
+    behavior as mkdtemp but can be used as a context manager.
+
+    Upon exiting the context, the directory and everything contained
+    in it are removed.
+
+    Examples
+    --------
+    >>> import os
+    >>> with tempdir() as tmpdir:
+    ...     fname = os.path.join(tmpdir, 'example_file.txt')
+    ...     with open(fname, 'wt') as fobj:
+    ...         _ = fobj.write('a string\\n')
+    >>> os.path.exists(tmpdir)
+    False
+    """
+    d = mkdtemp()
+    yield d
+    rmtree(d)
+
+
+@contextmanager
+def in_tempdir():
+    ''' Create, return, and change directory to a temporary directory
+
+    Examples
+    --------
+    >>> import os
+    >>> my_cwd = os.getcwd()
+    >>> with in_tempdir() as tmpdir:
+    ...     _ = open('test.txt', 'wt').write('some text')
+    ...     assert os.path.isfile('test.txt')
+    ...     assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
+    >>> os.path.exists(tmpdir)
+    False
+    >>> os.getcwd() == my_cwd
+    True
+    '''
+    pwd = os.getcwd()
+    d = mkdtemp()
+    os.chdir(d)
+    yield d
+    os.chdir(pwd)
+    rmtree(d)
+
+
+@contextmanager
+def in_dir(dir=None):
+    """ Change directory to given directory for duration of ``with`` block
+
+    Useful when you want to use `in_tempdir` for the final test, but
+    you are still debugging. For example, you may want to do this in the end:
+
+    >>> with in_tempdir() as tmpdir:
+    ...     # do something complicated which might break
+    ...     pass
+
+    But, indeed, the complicated thing does break, and meanwhile, the
+    ``in_tempdir`` context manager wiped out the directory with the
+    temporary files that you wanted for debugging. So, while debugging, you
+    replace with something like:
+
+    >>> with in_dir() as tmpdir: # Use working directory by default
+    ...     # do something complicated which might break
+    ...     pass
+
+    You can then look at the temporary file outputs to debug what is happening,
+    fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
+    """
+    cwd = os.getcwd()
+    if dir is None:
+        yield cwd
+        return
+    os.chdir(dir)
+    yield dir
+    os.chdir(cwd)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_util.py b/venv/lib/python3.10/site-packages/scipy/_lib/_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff297e80625d9f910f00ea58e62a06a770462117
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/_util.py
@@ -0,0 +1,948 @@
+import re
+from contextlib import contextmanager
+import functools
+import operator
+import warnings
+import numbers
+from collections import namedtuple
+import inspect
+import math
+from typing import (
+    Optional,
+    Union,
+    TYPE_CHECKING,
+    TypeVar,
+)
+
+import numpy as np
+from scipy._lib._array_api import array_namespace
+
+
+AxisError: type[Exception]
+ComplexWarning: type[Warning]
+VisibleDeprecationWarning: type[Warning]
+
+if np.lib.NumpyVersion(np.__version__) >= '1.25.0':
+    from numpy.exceptions import (
+        AxisError, ComplexWarning, VisibleDeprecationWarning,
+        DTypePromotionError
+    )
+else:
+    from numpy import (
+        AxisError, ComplexWarning, VisibleDeprecationWarning  # noqa: F401
+    )
+    DTypePromotionError = TypeError  # type: ignore
+
+np_long: type
+np_ulong: type
+
+if np.lib.NumpyVersion(np.__version__) >= "2.0.0.dev0":
+    try:
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                "ignore",
+                r".*In the future `np\.long` will be defined as.*",
+                FutureWarning,
+            )
+            np_long = np.long  # type: ignore[attr-defined]
+            np_ulong = np.ulong  # type: ignore[attr-defined]
+    except AttributeError:
+            np_long = np.int_
+            np_ulong = np.uint
+else:
+    np_long = np.int_
+    np_ulong = np.uint
+
+IntNumber = Union[int, np.integer]
+DecimalNumber = Union[float, np.floating, np.integer]
+
+copy_if_needed: Optional[bool]
+
+if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
+    copy_if_needed = None
+elif np.lib.NumpyVersion(np.__version__) < "1.28.0":
+    copy_if_needed = False
+else:
+    # 2.0.0 dev versions, handle cases where copy may or may not exist
+    try:
+        np.array([1]).__array__(copy=None)  # type: ignore[call-overload]
+        copy_if_needed = None
+    except TypeError:
+        copy_if_needed = False
+
+# Since Generator was introduced in numpy 1.17, the following condition is needed for
+# backward compatibility
+if TYPE_CHECKING:
+    SeedType = Optional[Union[IntNumber, np.random.Generator,
+                              np.random.RandomState]]
+    GeneratorType = TypeVar("GeneratorType", bound=Union[np.random.Generator,
+                                                         np.random.RandomState])
+
+try:
+    from numpy.random import Generator as Generator
+except ImportError:
+    class Generator:  # type: ignore[no-redef]
+        pass
+
+
+def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
+    """Return elements chosen from two possibilities depending on a condition
+
+    Equivalent to ``f(*arrays) if cond else fillvalue`` performed elementwise.
+
+    Parameters
+    ----------
+    cond : array
+        The condition (expressed as a boolean array).
+    arrays : tuple of array
+        Arguments to `f` (and `f2`). Must be broadcastable with `cond`.
+    f : callable
+        Where `cond` is True, output will be ``f(arr1[cond], arr2[cond], ...)``
+    fillvalue : object
+        If provided, value with which to fill output array where `cond` is
+        not True.
+    f2 : callable
+        If provided, output will be ``f2(arr1[cond], arr2[cond], ...)`` where
+        `cond` is not True.
+
+    Returns
+    -------
+    out : array
+        An array with elements from the output of `f` where `cond` is True
+        and `fillvalue` (or elements from the output of `f2`) elsewhere. The
+        returned array has data type determined by Type Promotion Rules
+        with the output of `f` and `fillvalue` (or the output of `f2`).
+
+    Notes
+    -----
+    ``xp.where(cond, x, fillvalue)`` requires explicitly forming `x` even where
+    `cond` is False. This function evaluates ``f(arr1[cond], arr2[cond], ...)``
+    onle where `cond` ``is True.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
+    >>> def f(a, b):
+    ...     return a*b
+    >>> _lazywhere(a > 2, (a, b), f, np.nan)
+    array([ nan,  nan,  21.,  32.])
+
+    """
+    xp = array_namespace(cond, *arrays)
+
+    if (f2 is fillvalue is None) or (f2 is not None and fillvalue is not None):
+        raise ValueError("Exactly one of `fillvalue` or `f2` must be given.")
+
+    args = xp.broadcast_arrays(cond, *arrays)
+    bool_dtype = xp.asarray([True]).dtype  # numpy 1.xx doesn't have `bool`
+    cond, arrays = xp.astype(args[0], bool_dtype, copy=False), args[1:]
+
+    temp1 = xp.asarray(f(*(arr[cond] for arr in arrays)))
+
+    if f2 is None:
+        fillvalue = xp.asarray(fillvalue)
+        dtype = xp.result_type(temp1.dtype, fillvalue.dtype)
+        out = xp.full(cond.shape, fill_value=fillvalue, dtype=dtype)
+    else:
+        ncond = ~cond
+        temp2 = xp.asarray(f2(*(arr[ncond] for arr in arrays)))
+        dtype = xp.result_type(temp1, temp2)
+        out = xp.empty(cond.shape, dtype=dtype)
+        out[ncond] = temp2
+
+    out[cond] = temp1
+
+    return out
+
+
+def _lazyselect(condlist, choicelist, arrays, default=0):
+    """
+    Mimic `np.select(condlist, choicelist)`.
+
+    Notice, it assumes that all `arrays` are of the same shape or can be
+    broadcasted together.
+
+    All functions in `choicelist` must accept array arguments in the order
+    given in `arrays` and must return an array of the same shape as broadcasted
+    `arrays`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> x = np.arange(6)
+    >>> np.select([x <3, x > 3], [x**2, x**3], default=0)
+    array([  0,   1,   4,   0,  64, 125])
+
+    >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
+    array([   0.,    1.,    4.,   0.,   64.,  125.])
+
+    >>> a = -np.ones_like(x)
+    >>> _lazyselect([x < 3, x > 3],
+    ...             [lambda x, a: x**2, lambda x, a: a * x**3],
+    ...             (x, a), default=np.nan)
+    array([   0.,    1.,    4.,   nan,  -64., -125.])
+
+    """
+    arrays = np.broadcast_arrays(*arrays)
+    tcode = np.mintypecode([a.dtype.char for a in arrays])
+    out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
+    for func, cond in zip(choicelist, condlist):
+        if np.all(cond is False):
+            continue
+        cond, _ = np.broadcast_arrays(cond, arrays[0])
+        temp = tuple(np.extract(cond, arr) for arr in arrays)
+        np.place(out, cond, func(*temp))
+    return out
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+    """Allocate a new ndarray with aligned memory.
+
+    Primary use case for this currently is working around a f2py issue
+    in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
+    not necessarily create arrays aligned up to it.
+
+    """
+    dtype = np.dtype(dtype)
+    if align is None:
+        align = dtype.alignment
+    if not hasattr(shape, '__len__'):
+        shape = (shape,)
+    size = functools.reduce(operator.mul, shape) * dtype.itemsize
+    buf = np.empty(size + align + 1, np.uint8)
+    offset = buf.__array_interface__['data'][0] % align
+    if offset != 0:
+        offset = align - offset
+    # Note: slices producing 0-size arrays do not necessarily change
+    # data pointer --- so we use and allocate size+1
+    buf = buf[offset:offset+size+1][:-1]
+    data = np.ndarray(shape, dtype, buf, order=order)
+    data.fill(0)
+    return data
+
+
+def _prune_array(array):
+    """Return an array equivalent to the input array. If the input
+    array is a view of a much larger array, copy its contents to a
+    newly allocated array. Otherwise, return the input unchanged.
+    """
+    if array.base is not None and array.size < array.base.size // 2:
+        return array.copy()
+    return array
+
+
+def float_factorial(n: int) -> float:
+    """Compute the factorial and return as a float
+
+    Returns infinity when result is too large for a double
+    """
+    return float(math.factorial(n)) if n < 171 else np.inf
+
+
+# copy-pasted from scikit-learn utils/validation.py
+# change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
+def check_random_state(seed):
+    """Turn `seed` into a `np.random.RandomState` instance.
+
+    Parameters
+    ----------
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
+        Random number generator.
+
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, (np.random.RandomState, np.random.Generator)):
+        return seed
+
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
+
+
+def _asarray_validated(a, check_finite=True,
+                       sparse_ok=False, objects_ok=False, mask_ok=False,
+                       as_inexact=False):
+    """
+    Helper function for SciPy argument validation.
+
+    Many SciPy linear algebra functions do support arbitrary array-like
+    input arguments. Examples of commonly unsupported inputs include
+    matrices containing inf/nan, sparse matrix representations, and
+    matrices with complicated elements.
+
+    Parameters
+    ----------
+    a : array_like
+        The array-like input.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+    sparse_ok : bool, optional
+        True if scipy sparse matrices are allowed.
+    objects_ok : bool, optional
+        True if arrays with dype('O') are allowed.
+    mask_ok : bool, optional
+        True if masked arrays are allowed.
+    as_inexact : bool, optional
+        True to convert the input array to a np.inexact dtype.
+
+    Returns
+    -------
+    ret : ndarray
+        The converted validated array.
+
+    """
+    if not sparse_ok:
+        import scipy.sparse
+        if scipy.sparse.issparse(a):
+            msg = ('Sparse matrices are not supported by this function. '
+                   'Perhaps one of the scipy.sparse.linalg functions '
+                   'would work instead.')
+            raise ValueError(msg)
+    if not mask_ok:
+        if np.ma.isMaskedArray(a):
+            raise ValueError('masked arrays are not supported')
+    toarray = np.asarray_chkfinite if check_finite else np.asarray
+    a = toarray(a)
+    if not objects_ok:
+        if a.dtype is np.dtype('O'):
+            raise ValueError('object arrays are not supported')
+    if as_inexact:
+        if not np.issubdtype(a.dtype, np.inexact):
+            a = toarray(a, dtype=np.float64)
+    return a
+
+
+def _validate_int(k, name, minimum=None):
+    """
+    Validate a scalar integer.
+
+    This function can be used to validate an argument to a function
+    that expects the value to be an integer.  It uses `operator.index`
+    to validate the value (so, for example, k=2.0 results in a
+    TypeError).
+
+    Parameters
+    ----------
+    k : int
+        The value to be validated.
+    name : str
+        The name of the parameter.
+    minimum : int, optional
+        An optional lower bound.
+    """
+    try:
+        k = operator.index(k)
+    except TypeError:
+        raise TypeError(f'{name} must be an integer.') from None
+    if minimum is not None and k < minimum:
+        raise ValueError(f'{name} must be an integer not less '
+                         f'than {minimum}') from None
+    return k
+
+
+# Add a replacement for inspect.getfullargspec()/
+# The version below is borrowed from Django,
+# https://github.com/django/django/pull/4846.
+
+# Note an inconsistency between inspect.getfullargspec(func) and
+# inspect.signature(func). If `func` is a bound method, the latter does *not*
+# list `self` as a first argument, while the former *does*.
+# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
+# mimics `inspect.getfullargspec` but does not list `self`.
+#
+# This way, the caller code does not need to know whether it uses a legacy
+# .getfullargspec or a bright and shiny .signature.
+
+FullArgSpec = namedtuple('FullArgSpec',
+                         ['args', 'varargs', 'varkw', 'defaults',
+                          'kwonlyargs', 'kwonlydefaults', 'annotations'])
+
+
+def getfullargspec_no_self(func):
+    """inspect.getfullargspec replacement using inspect.signature.
+
+    If func is a bound method, do not list the 'self' parameter.
+
+    Parameters
+    ----------
+    func : callable
+        A callable to inspect
+
+    Returns
+    -------
+    fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+                              kwonlydefaults, annotations)
+
+        NOTE: if the first argument of `func` is self, it is *not*, I repeat
+        *not*, included in fullargspec.args.
+        This is done for consistency between inspect.getargspec() under
+        Python 2.x, and inspect.signature() under Python 3.x.
+
+    """
+    sig = inspect.signature(func)
+    args = [
+        p.name for p in sig.parameters.values()
+        if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
+                      inspect.Parameter.POSITIONAL_ONLY]
+    ]
+    varargs = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.VAR_POSITIONAL
+    ]
+    varargs = varargs[0] if varargs else None
+    varkw = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.VAR_KEYWORD
+    ]
+    varkw = varkw[0] if varkw else None
+    defaults = tuple(
+        p.default for p in sig.parameters.values()
+        if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
+            p.default is not p.empty)
+    ) or None
+    kwonlyargs = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.KEYWORD_ONLY
+    ]
+    kwdefaults = {p.name: p.default for p in sig.parameters.values()
+                  if p.kind == inspect.Parameter.KEYWORD_ONLY and
+                  p.default is not p.empty}
+    annotations = {p.name: p.annotation for p in sig.parameters.values()
+                   if p.annotation is not p.empty}
+    return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+                       kwdefaults or None, annotations)
+
+
+class _FunctionWrapper:
+    """
+    Object to wrap user's function, allowing picklability
+    """
+    def __init__(self, f, args):
+        self.f = f
+        self.args = [] if args is None else args
+
+    def __call__(self, x):
+        return self.f(x, *self.args)
+
+
+class MapWrapper:
+    """
+    Parallelisation wrapper for working with map-like callables, such as
+    `multiprocessing.Pool.map`.
+
+    Parameters
+    ----------
+    pool : int or map-like callable
+        If `pool` is an integer, then it specifies the number of threads to
+        use for parallelization. If ``int(pool) == 1``, then no parallel
+        processing is used and the map builtin is used.
+        If ``pool == -1``, then the pool will utilize all available CPUs.
+        If `pool` is a map-like callable that follows the same
+        calling sequence as the built-in map function, then this callable is
+        used for parallelization.
+    """
+    def __init__(self, pool=1):
+        self.pool = None
+        self._mapfunc = map
+        self._own_pool = False
+
+        if callable(pool):
+            self.pool = pool
+            self._mapfunc = self.pool
+        else:
+            from multiprocessing import Pool
+            # user supplies a number
+            if int(pool) == -1:
+                # use as many processors as possible
+                self.pool = Pool()
+                self._mapfunc = self.pool.map
+                self._own_pool = True
+            elif int(pool) == 1:
+                pass
+            elif int(pool) > 1:
+                # use the number of processors requested
+                self.pool = Pool(processes=int(pool))
+                self._mapfunc = self.pool.map
+                self._own_pool = True
+            else:
+                raise RuntimeError("Number of workers specified must be -1,"
+                                   " an int >= 1, or an object with a 'map' "
+                                   "method")
+
+    def __enter__(self):
+        return self
+
+    def terminate(self):
+        if self._own_pool:
+            self.pool.terminate()
+
+    def join(self):
+        if self._own_pool:
+            self.pool.join()
+
+    def close(self):
+        if self._own_pool:
+            self.pool.close()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if self._own_pool:
+            self.pool.close()
+            self.pool.terminate()
+
+    def __call__(self, func, iterable):
+        # only accept one iterable because that's all Pool.map accepts
+        try:
+            return self._mapfunc(func, iterable)
+        except TypeError as e:
+            # wrong number of arguments
+            raise TypeError("The map-like callable must be of the"
+                            " form f(func, iterable)") from e
+
+
+def rng_integers(gen, low, high=None, size=None, dtype='int64',
+                 endpoint=False):
+    """
+    Return random integers from low (inclusive) to high (exclusive), or if
+    endpoint=True, low (inclusive) to high (inclusive). Replaces
+    `RandomState.randint` (with endpoint=False) and
+    `RandomState.random_integers` (with endpoint=True).
+
+    Return random integers from the "discrete uniform" distribution of the
+    specified dtype. If high is None (the default), then results are from
+    0 to low.
+
+    Parameters
+    ----------
+    gen : {None, np.random.RandomState, np.random.Generator}
+        Random number generator. If None, then the np.random.RandomState
+        singleton is used.
+    low : int or array-like of ints
+        Lowest (signed) integers to be drawn from the distribution (unless
+        high=None, in which case this parameter is 0 and this value is used
+        for high).
+    high : int or array-like of ints
+        If provided, one above the largest (signed) integer to be drawn from
+        the distribution (see above for behavior if high=None). If array-like,
+        must contain integer values.
+    size : array-like of ints, optional
+        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
+        samples are drawn. Default is None, in which case a single value is
+        returned.
+    dtype : {str, dtype}, optional
+        Desired dtype of the result. All dtypes are determined by their name,
+        i.e., 'int64', 'int', etc, so byteorder is not available and a specific
+        precision may have different C types depending on the platform.
+        The default value is 'int64'.
+    endpoint : bool, optional
+        If True, sample from the interval [low, high] instead of the default
+        [low, high) Defaults to False.
+
+    Returns
+    -------
+    out: int or ndarray of ints
+        size-shaped array of random integers from the appropriate distribution,
+        or a single such random int if size not provided.
+    """
+    if isinstance(gen, Generator):
+        return gen.integers(low, high=high, size=size, dtype=dtype,
+                            endpoint=endpoint)
+    else:
+        if gen is None:
+            # default is RandomState singleton used by np.random.
+            gen = np.random.mtrand._rand
+        if endpoint:
+            # inclusive of endpoint
+            # remember that low and high can be arrays, so don't modify in
+            # place
+            if high is None:
+                return gen.randint(low + 1, size=size, dtype=dtype)
+            if high is not None:
+                return gen.randint(low, high=high + 1, size=size, dtype=dtype)
+
+        # exclusive
+        return gen.randint(low, high=high, size=size, dtype=dtype)
+
+
+@contextmanager
+def _fixed_default_rng(seed=1638083107694713882823079058616272161):
+    """Context with a fixed np.random.default_rng seed."""
+    orig_fun = np.random.default_rng
+    np.random.default_rng = lambda seed=seed: orig_fun(seed)
+    try:
+        yield
+    finally:
+        np.random.default_rng = orig_fun
+
+
+def _rng_html_rewrite(func):
+    """Rewrite the HTML rendering of ``np.random.default_rng``.
+
+    This is intended to decorate
+    ``numpydoc.docscrape_sphinx.SphinxDocString._str_examples``.
+
+    Examples are only run by Sphinx when there are plot involved. Even so,
+    it does not change the result values getting printed.
+    """
+    # hexadecimal or number seed, case-insensitive
+    pattern = re.compile(r'np.random.default_rng\((0x[0-9A-F]+|\d+)\)', re.I)
+
+    def _wrapped(*args, **kwargs):
+        res = func(*args, **kwargs)
+        lines = [
+            re.sub(pattern, 'np.random.default_rng()', line)
+            for line in res
+        ]
+        return lines
+
+    return _wrapped
+
+
+def _argmin(a, keepdims=False, axis=None):
+    """
+    argmin with a `keepdims` parameter.
+
+    See https://github.com/numpy/numpy/issues/8710
+
+    If axis is not None, a.shape[axis] must be greater than 0.
+    """
+    res = np.argmin(a, axis=axis)
+    if keepdims and axis is not None:
+        res = np.expand_dims(res, axis=axis)
+    return res
+
+
+def _first_nonnan(a, axis):
+    """
+    Return the first non-nan value along the given axis.
+
+    If a slice is all nan, nan is returned for that slice.
+
+    The shape of the return value corresponds to ``keepdims=True``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> nan = np.nan
+    >>> a = np.array([[ 3.,  3., nan,  3.],
+                      [ 1., nan,  2.,  4.],
+                      [nan, nan,  9., -1.],
+                      [nan,  5.,  4.,  3.],
+                      [ 2.,  2.,  2.,  2.],
+                      [nan, nan, nan, nan]])
+    >>> _first_nonnan(a, axis=0)
+    array([[3., 3., 2., 3.]])
+    >>> _first_nonnan(a, axis=1)
+    array([[ 3.],
+           [ 1.],
+           [ 9.],
+           [ 5.],
+           [ 2.],
+           [nan]])
+    """
+    k = _argmin(np.isnan(a), axis=axis, keepdims=True)
+    return np.take_along_axis(a, k, axis=axis)
+
+
+def _nan_allsame(a, axis, keepdims=False):
+    """
+    Determine if the values along an axis are all the same.
+
+    nan values are ignored.
+
+    `a` must be a numpy array.
+
+    `axis` is assumed to be normalized; that is, 0 <= axis < a.ndim.
+
+    For an axis of length 0, the result is True.  That is, we adopt the
+    convention that ``allsame([])`` is True. (There are no values in the
+    input that are different.)
+
+    `True` is returned for slices that are all nan--not because all the
+    values are the same, but because this is equivalent to ``allsame([])``.
+
+    Examples
+    --------
+    >>> from numpy import nan, array
+    >>> a = array([[ 3.,  3., nan,  3.],
+    ...            [ 1., nan,  2.,  4.],
+    ...            [nan, nan,  9., -1.],
+    ...            [nan,  5.,  4.,  3.],
+    ...            [ 2.,  2.,  2.,  2.],
+    ...            [nan, nan, nan, nan]])
+    >>> _nan_allsame(a, axis=1, keepdims=True)
+    array([[ True],
+           [False],
+           [False],
+           [False],
+           [ True],
+           [ True]])
+    """
+    if axis is None:
+        if a.size == 0:
+            return True
+        a = a.ravel()
+        axis = 0
+    else:
+        shp = a.shape
+        if shp[axis] == 0:
+            shp = shp[:axis] + (1,)*keepdims + shp[axis + 1:]
+            return np.full(shp, fill_value=True, dtype=bool)
+    a0 = _first_nonnan(a, axis=axis)
+    return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)
+
+
+def _contains_nan(a, nan_policy='propagate', use_summation=True,
+                  policies=None):
+    if not isinstance(a, np.ndarray):
+        use_summation = False  # some array_likes ignore nans (e.g. pandas)
+    if policies is None:
+        policies = ['propagate', 'raise', 'omit']
+    if nan_policy not in policies:
+        raise ValueError("nan_policy must be one of {%s}" %
+                         ', '.join("'%s'" % s for s in policies))
+
+    if np.issubdtype(a.dtype, np.inexact):
+        # The summation method avoids creating a (potentially huge) array.
+        if use_summation:
+            with np.errstate(invalid='ignore', over='ignore'):
+                contains_nan = np.isnan(np.sum(a))
+        else:
+            contains_nan = np.isnan(a).any()
+    elif np.issubdtype(a.dtype, object):
+        contains_nan = False
+        for el in a.ravel():
+            # isnan doesn't work on non-numeric elements
+            if np.issubdtype(type(el), np.number) and np.isnan(el):
+                contains_nan = True
+                break
+    else:
+        # Only `object` and `inexact` arrays can have NaNs
+        contains_nan = False
+
+    if contains_nan and nan_policy == 'raise':
+        raise ValueError("The input contains nan values")
+
+    return contains_nan, nan_policy
+
+
+def _rename_parameter(old_name, new_name, dep_version=None):
+    """
+    Generate decorator for backward-compatible keyword renaming.
+
+    Apply the decorator generated by `_rename_parameter` to functions with a
+    recently renamed parameter to maintain backward-compatibility.
+
+    After decoration, the function behaves as follows:
+    If only the new parameter is passed into the function, behave as usual.
+    If only the old parameter is passed into the function (as a keyword), raise
+    a DeprecationWarning if `dep_version` is provided, and behave as usual
+    otherwise.
+    If both old and new parameters are passed into the function, raise a
+    DeprecationWarning if `dep_version` is provided, and raise the appropriate
+    TypeError (function got multiple values for argument).
+
+    Parameters
+    ----------
+    old_name : str
+        Old name of parameter
+    new_name : str
+        New name of parameter
+    dep_version : str, optional
+        Version of SciPy in which old parameter was deprecated in the format
+        'X.Y.Z'. If supplied, the deprecation message will indicate that
+        support for the old parameter will be removed in version 'X.Y+2.Z'
+
+    Notes
+    -----
+    Untested with functions that accept *args. Probably won't work as written.
+
+    """
+    def decorator(fun):
+        @functools.wraps(fun)
+        def wrapper(*args, **kwargs):
+            if old_name in kwargs:
+                if dep_version:
+                    end_version = dep_version.split('.')
+                    end_version[1] = str(int(end_version[1]) + 2)
+                    end_version = '.'.join(end_version)
+                    message = (f"Use of keyword argument `{old_name}` is "
+                               f"deprecated and replaced by `{new_name}`.  "
+                               f"Support for `{old_name}` will be removed "
+                               f"in SciPy {end_version}.")
+                    warnings.warn(message, DeprecationWarning, stacklevel=2)
+                if new_name in kwargs:
+                    message = (f"{fun.__name__}() got multiple values for "
+                               f"argument now known as `{new_name}`")
+                    raise TypeError(message)
+                kwargs[new_name] = kwargs.pop(old_name)
+            return fun(*args, **kwargs)
+        return wrapper
+    return decorator
+
+
+def _rng_spawn(rng, n_children):
+    # spawns independent RNGs from a parent RNG
+    bg = rng._bit_generator
+    ss = bg._seed_seq
+    child_rngs = [np.random.Generator(type(bg)(child_ss))
+                  for child_ss in ss.spawn(n_children)]
+    return child_rngs
+
+
+def _get_nan(*data):
+    # Get NaN of appropriate dtype for data
+    data = [np.asarray(item) for item in data]
+    try:
+        dtype = np.result_type(*data, np.half)  # must be a float16 at least
+    except DTypePromotionError:
+        # fallback to float64
+        return np.array(np.nan, dtype=np.float64)[()]
+    return np.array(np.nan, dtype=dtype)[()]
+
+
+def normalize_axis_index(axis, ndim):
+    # Check if `axis` is in the correct range and normalize it
+    if axis < -ndim or axis >= ndim:
+        msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
+        raise AxisError(msg)
+
+    if axis < 0:
+        axis = axis + ndim
+    return axis
+
+
+def _call_callback_maybe_halt(callback, res):
+    """Call wrapped callback; return True if algorithm should stop.
+
+    Parameters
+    ----------
+    callback : callable or None
+        A user-provided callback wrapped with `_wrap_callback`
+    res : OptimizeResult
+        Information about the current iterate
+
+    Returns
+    -------
+    halt : bool
+        True if minimization should stop
+
+    """
+    if callback is None:
+        return False
+    try:
+        callback(res)
+        return False
+    except StopIteration:
+        callback.stop_iteration = True
+        return True
+
+
+class _RichResult(dict):
+    """ Container for multiple outputs with pretty-printing """
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError as e:
+            raise AttributeError(name) from e
+
+    __setattr__ = dict.__setitem__
+    __delattr__ = dict.__delitem__
+
+    def __repr__(self):
+        order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl',
+                      'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin',
+                      'converged', 'flag', 'function_calls', 'iterations',
+                      'root']
+        order_keys = getattr(self, '_order_keys', order_keys)
+        # 'slack', 'con' are redundant with residuals
+        # 'crossover_nit' is probably not interesting to most users
+        omit_keys = {'slack', 'con', 'crossover_nit', '_order_keys'}
+
+        def key(item):
+            try:
+                return order_keys.index(item[0].lower())
+            except ValueError:  # item not in list
+                return np.inf
+
+        def omit_redundant(items):
+            for item in items:
+                if item[0] in omit_keys:
+                    continue
+                yield item
+
+        def item_sorter(d):
+            return sorted(omit_redundant(d.items()), key=key)
+
+        if self.keys():
+            return _dict_formatter(self, sorter=item_sorter)
+        else:
+            return self.__class__.__name__ + "()"
+
+    def __dir__(self):
+        return list(self.keys())
+
+
+def _indenter(s, n=0):
+    """
+    Ensures that lines after the first are indented by the specified amount
+    """
+    split = s.split("\n")
+    indent = " "*n
+    return ("\n" + indent).join(split)
+
+
+def _float_formatter_10(x):
+    """
+    Returns a string representation of a float with exactly ten characters
+    """
+    if np.isposinf(x):
+        return "       inf"
+    elif np.isneginf(x):
+        return "      -inf"
+    elif np.isnan(x):
+        return "       nan"
+    return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)
+
+
+def _dict_formatter(d, n=0, mplus=1, sorter=None):
+    """
+    Pretty printer for dictionaries
+
+    `n` keeps track of the starting indentation;
+    lines are indented by this much after a line break.
+    `mplus` is additional left padding applied to keys
+    """
+    if isinstance(d, dict):
+        m = max(map(len, list(d.keys()))) + mplus  # width to print keys
+        s = '\n'.join([k.rjust(m) + ': ' +  # right justified, width m
+                       _indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
+                       for k, v in sorter(d)])  # +2 for ': '
+    else:
+        # By default, NumPy arrays print with linewidth=76. `n` is
+        # the indent at which a line begins printing, so it is subtracted
+        # from the default to avoid exceeding 76 characters total.
+        # `edgeitems` is the number of elements to include before and after
+        # ellipses when arrays are not shown in full.
+        # `threshold` is the maximum number of elements for which an
+        # array is shown in full.
+        # These values tend to work well for use with OptimizeResult.
+        with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
+                             formatter={'float_kind': _float_formatter_10}):
+            s = str(d)
+    return s
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/decorator.py b/venv/lib/python3.10/site-packages/scipy/_lib/decorator.py
new file mode 100644
index 0000000000000000000000000000000000000000..02121774d3c2a9407a73366bb3e5915387a571d0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/decorator.py
@@ -0,0 +1,399 @@
+# #########################     LICENSE     ############################ #
+
+# Copyright (c) 2005-2015, Michele Simionato
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+#   Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+#   Redistributions in bytecode form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see https://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+import re
+import sys
+import inspect
+import operator
+import itertools
+import collections
+
+from inspect import getfullargspec
+
+__version__ = '4.0.5'
+
+
+def get_init(cls):
+    return cls.__init__
+
+
+# getargspec has been deprecated in Python 3.5
+ArgSpec = collections.namedtuple(
+    'ArgSpec', 'args varargs varkw defaults')
+
+
+def getargspec(f):
+    """A replacement for inspect.getargspec"""
+    spec = getfullargspec(f)
+    return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
+
+
+DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+
+# basic functionality
+class FunctionMaker:
+    """
+    An object with the ability to create functions with a given signature.
+    It has attributes name, doc, module, signature, defaults, dict, and
+    methods update and make.
+    """
+
+    # Atomic get-and-increment provided by the GIL
+    _compile_count = itertools.count()
+
+    def __init__(self, func=None, name=None, signature=None,
+                 defaults=None, doc=None, module=None, funcdict=None):
+        self.shortsignature = signature
+        if func:
+            # func can be a class or a callable, but not an instance method
+            self.name = func.__name__
+            if self.name == '':  # small hack for lambda functions
+                self.name = '_lambda_'
+            self.doc = func.__doc__
+            self.module = func.__module__
+            if inspect.isfunction(func):
+                argspec = getfullargspec(func)
+                self.annotations = getattr(func, '__annotations__', {})
+                for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+                          'kwonlydefaults'):
+                    setattr(self, a, getattr(argspec, a))
+                for i, arg in enumerate(self.args):
+                    setattr(self, 'arg%d' % i, arg)
+                allargs = list(self.args)
+                allshortargs = list(self.args)
+                if self.varargs:
+                    allargs.append('*' + self.varargs)
+                    allshortargs.append('*' + self.varargs)
+                elif self.kwonlyargs:
+                    allargs.append('*')  # single star syntax
+                for a in self.kwonlyargs:
+                    allargs.append('%s=None' % a)
+                    allshortargs.append(f'{a}={a}')
+                if self.varkw:
+                    allargs.append('**' + self.varkw)
+                    allshortargs.append('**' + self.varkw)
+                self.signature = ', '.join(allargs)
+                self.shortsignature = ', '.join(allshortargs)
+                self.dict = func.__dict__.copy()
+        # func=None happens when decorating a caller
+        if name:
+            self.name = name
+        if signature is not None:
+            self.signature = signature
+        if defaults:
+            self.defaults = defaults
+        if doc:
+            self.doc = doc
+        if module:
+            self.module = module
+        if funcdict:
+            self.dict = funcdict
+        # check existence required attributes
+        assert hasattr(self, 'name')
+        if not hasattr(self, 'signature'):
+            raise TypeError('You are decorating a non-function: %s' % func)
+
+    def update(self, func, **kw):
+        "Update the signature of func with the data in self"
+        func.__name__ = self.name
+        func.__doc__ = getattr(self, 'doc', None)
+        func.__dict__ = getattr(self, 'dict', {})
+        func.__defaults__ = getattr(self, 'defaults', ())
+        func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+        func.__annotations__ = getattr(self, 'annotations', None)
+        try:
+            frame = sys._getframe(3)
+        except AttributeError:  # for IronPython and similar implementations
+            callermodule = '?'
+        else:
+            callermodule = frame.f_globals.get('__name__', '?')
+        func.__module__ = getattr(self, 'module', callermodule)
+        func.__dict__.update(kw)
+
+    def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+        "Make a new function from a given template and update the signature"
+        src = src_templ % vars(self)  # expand name and signature
+        evaldict = evaldict or {}
+        mo = DEF.match(src)
+        if mo is None:
+            raise SyntaxError('not a valid function template\n%s' % src)
+        name = mo.group(1)  # extract the function name
+        names = set([name] + [arg.strip(' *') for arg in
+                              self.shortsignature.split(',')])
+        for n in names:
+            if n in ('_func_', '_call_'):
+                raise NameError(f'{n} is overridden in\n{src}')
+        if not src.endswith('\n'):  # add a newline just for safety
+            src += '\n'  # this is needed in old versions of Python
+
+        # Ensure each generated function has a unique filename for profilers
+        # (such as cProfile) that depend on the tuple of (,
+        # , ) being unique.
+        filename = '' % (next(self._compile_count),)
+        try:
+            code = compile(src, filename, 'single')
+            exec(code, evaldict)
+        except:  # noqa: E722
+            print('Error in generated code:', file=sys.stderr)
+            print(src, file=sys.stderr)
+            raise
+        func = evaldict[name]
+        if addsource:
+            attrs['__source__'] = src
+        self.update(func, **attrs)
+        return func
+
+    @classmethod
+    def create(cls, obj, body, evaldict, defaults=None,
+               doc=None, module=None, addsource=True, **attrs):
+        """
+        Create a function from the strings name, signature, and body.
+        evaldict is the evaluation dictionary. If addsource is true, an
+        attribute __source__ is added to the result. The attributes attrs
+        are added, if any.
+        """
+        if isinstance(obj, str):  # "name(signature)"
+            name, rest = obj.strip().split('(', 1)
+            signature = rest[:-1]  # strip a right parens
+            func = None
+        else:  # a function
+            name = None
+            signature = None
+            func = obj
+        self = cls(func, name, signature, defaults, doc, module)
+        ibody = '\n'.join('    ' + line for line in body.splitlines())
+        return self.make('def %(name)s(%(signature)s):\n' + ibody,
+                         evaldict, addsource, **attrs)
+
+
+def decorate(func, caller):
+    """
+    decorate(func, caller) decorates a function using a caller.
+    """
+    evaldict = func.__globals__.copy()
+    evaldict['_call_'] = caller
+    evaldict['_func_'] = func
+    fun = FunctionMaker.create(
+        func, "return _call_(_func_, %(shortsignature)s)",
+        evaldict, __wrapped__=func)
+    if hasattr(func, '__qualname__'):
+        fun.__qualname__ = func.__qualname__
+    return fun
+
+
+def decorator(caller, _func=None):
+    """decorator(caller) converts a caller function into a decorator"""
+    if _func is not None:  # return a decorated function
+        # this is obsolete behavior; you should use decorate instead
+        return decorate(_func, caller)
+    # else return a decorator function
+    if inspect.isclass(caller):
+        name = caller.__name__.lower()
+        callerfunc = get_init(caller)
+        doc = (f'decorator({caller.__name__}) converts functions/generators into ' 
+               f'factories of {caller.__name__} objects')
+    elif inspect.isfunction(caller):
+        if caller.__name__ == '':
+            name = '_lambda_'
+        else:
+            name = caller.__name__
+        callerfunc = caller
+        doc = caller.__doc__
+    else:  # assume caller is an object with a __call__ method
+        name = caller.__class__.__name__.lower()
+        callerfunc = caller.__call__.__func__
+        doc = caller.__call__.__doc__
+    evaldict = callerfunc.__globals__.copy()
+    evaldict['_call_'] = caller
+    evaldict['_decorate_'] = decorate
+    return FunctionMaker.create(
+        '%s(func)' % name, 'return _decorate_(func, _call_)',
+        evaldict, doc=doc, module=caller.__module__,
+        __wrapped__=caller)
+
+
+# ####################### contextmanager ####################### #
+
+try:  # Python >= 3.2
+    from contextlib import _GeneratorContextManager
+except ImportError:  # Python >= 2.5
+    from contextlib import GeneratorContextManager as _GeneratorContextManager
+
+
+class ContextManager(_GeneratorContextManager):
+    def __call__(self, func):
+        """Context manager decorator"""
+        return FunctionMaker.create(
+            func, "with _self_: return _func_(%(shortsignature)s)",
+            dict(_self_=self, _func_=func), __wrapped__=func)
+
+
+init = getfullargspec(_GeneratorContextManager.__init__)
+n_args = len(init.args)
+if n_args == 2 and not init.varargs:  # (self, genobj) Python 2.7
+    def __init__(self, g, *a, **k):
+        return _GeneratorContextManager.__init__(self, g(*a, **k))
+    ContextManager.__init__ = __init__
+elif n_args == 2 and init.varargs:  # (self, gen, *a, **k) Python 3.4
+    pass
+elif n_args == 4:  # (self, gen, args, kwds) Python 3.5
+    def __init__(self, g, *a, **k):
+        return _GeneratorContextManager.__init__(self, g, a, k)
+    ContextManager.__init__ = __init__
+
+contextmanager = decorator(ContextManager)
+
+
+# ############################ dispatch_on ############################ #
+
+def append(a, vancestors):
+    """
+    Append ``a`` to the list of the virtual ancestors, unless it is already
+    included.
+    """
+    add = True
+    for j, va in enumerate(vancestors):
+        if issubclass(va, a):
+            add = False
+            break
+        if issubclass(a, va):
+            vancestors[j] = a
+            add = False
+    if add:
+        vancestors.append(a)
+
+
+# inspired from simplegeneric by P.J. Eby and functools.singledispatch
+def dispatch_on(*dispatch_args):
+    """
+    Factory of decorators turning a function into a generic function
+    dispatching on the given arguments.
+    """
+    assert dispatch_args, 'No dispatch args passed'
+    dispatch_str = '(%s,)' % ', '.join(dispatch_args)
+
+    def check(arguments, wrong=operator.ne, msg=''):
+        """Make sure one passes the expected number of arguments"""
+        if wrong(len(arguments), len(dispatch_args)):
+            raise TypeError('Expected %d arguments, got %d%s' %
+                            (len(dispatch_args), len(arguments), msg))
+
+    def gen_func_dec(func):
+        """Decorator turning a function into a generic function"""
+
+        # first check the dispatch arguments
+        argset = set(getfullargspec(func).args)
+        if not set(dispatch_args) <= argset:
+            raise NameError('Unknown dispatch arguments %s' % dispatch_str)
+
+        typemap = {}
+
+        def vancestors(*types):
+            """
+            Get a list of sets of virtual ancestors for the given types
+            """
+            check(types)
+            ras = [[] for _ in range(len(dispatch_args))]
+            for types_ in typemap:
+                for t, type_, ra in zip(types, types_, ras):
+                    if issubclass(t, type_) and type_ not in t.__mro__:
+                        append(type_, ra)
+            return [set(ra) for ra in ras]
+
+        def ancestors(*types):
+            """
+            Get a list of virtual MROs, one for each type
+            """
+            check(types)
+            lists = []
+            for t, vas in zip(types, vancestors(*types)):
+                n_vas = len(vas)
+                if n_vas > 1:
+                    raise RuntimeError(
+                        f'Ambiguous dispatch for {t}: {vas}')
+                elif n_vas == 1:
+                    va, = vas
+                    mro = type('t', (t, va), {}).__mro__[1:]
+                else:
+                    mro = t.__mro__
+                lists.append(mro[:-1])  # discard t and object
+            return lists
+
+        def register(*types):
+            """
+            Decorator to register an implementation for the given types
+            """
+            check(types)
+
+            def dec(f):
+                check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
+                typemap[types] = f
+                return f
+            return dec
+
+        def dispatch_info(*types):
+            """
+            An utility to introspect the dispatch algorithm
+            """
+            check(types)
+            lst = [tuple(a.__name__ for a in anc)
+                   for anc in itertools.product(*ancestors(*types))]
+            return lst
+
+        def _dispatch(dispatch_args, *args, **kw):
+            types = tuple(type(arg) for arg in dispatch_args)
+            try:  # fast path
+                f = typemap[types]
+            except KeyError:
+                pass
+            else:
+                return f(*args, **kw)
+            combinations = itertools.product(*ancestors(*types))
+            next(combinations)  # the first one has been already tried
+            for types_ in combinations:
+                f = typemap.get(types_)
+                if f is not None:
+                    return f(*args, **kw)
+
+            # else call the default implementation
+            return func(*args, **kw)
+
+        return FunctionMaker.create(
+            func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
+            dict(_f_=_dispatch), register=register, default=func,
+            typemap=typemap, vancestors=vancestors, ancestors=ancestors,
+            dispatch_info=dispatch_info, __wrapped__=func)
+
+    gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
+    return gen_func_dec
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/deprecation.py b/venv/lib/python3.10/site-packages/scipy/_lib/deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..01a1dfa73695f00b409a3adab26dbb98b804b384
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/deprecation.py
@@ -0,0 +1,239 @@
+from inspect import Parameter, signature
+import functools
+import warnings
+from importlib import import_module
+
+
+__all__ = ["_deprecated"]
+
+
+# Object to use as default value for arguments to be deprecated. This should
+# be used over 'None' as the user could parse 'None' as a positional argument
+_NoValue = object()
+
+def _sub_module_deprecation(*, sub_package, module, private_modules, all,
+                            attribute, correct_module=None):
+    """Helper function for deprecating modules that are public but were
+    intended to be private.
+
+    Parameters
+    ----------
+    sub_package : str
+        Subpackage the module belongs to eg. stats
+    module : str
+        Public but intended private module to deprecate
+    private_modules : list
+        Private replacement(s) for `module`; should contain the
+        content of ``all``, possibly spread over several modules.
+    all : list
+        ``__all__`` belonging to `module`
+    attribute : str
+        The attribute in `module` being accessed
+    correct_module : str, optional
+        Module in `sub_package` that `attribute` should be imported from.
+        Default is that `attribute` should be imported from ``scipy.sub_package``.
+    """
+    if correct_module is not None:
+        correct_import = f"scipy.{sub_package}.{correct_module}"
+    else:
+        correct_import = f"scipy.{sub_package}"
+
+    if attribute not in all:
+        raise AttributeError(
+            f"`scipy.{sub_package}.{module}` has no attribute `{attribute}`; "
+            f"furthermore, `scipy.{sub_package}.{module}` is deprecated "
+            f"and will be removed in SciPy 2.0.0."
+        )
+
+    attr = getattr(import_module(correct_import), attribute, None)
+
+    if attr is not None:
+        message = (
+            f"Please import `{attribute}` from the `{correct_import}` namespace; "
+            f"the `scipy.{sub_package}.{module}` namespace is deprecated "
+            f"and will be removed in SciPy 2.0.0."
+        )
+    else:
+        message = (
+            f"`scipy.{sub_package}.{module}.{attribute}` is deprecated along with "
+            f"the `scipy.{sub_package}.{module}` namespace. "
+            f"`scipy.{sub_package}.{module}.{attribute}` will be removed "
+            f"in SciPy 1.14.0, and the `scipy.{sub_package}.{module}` namespace "
+            f"will be removed in SciPy 2.0.0."
+        )
+
+    warnings.warn(message, category=DeprecationWarning, stacklevel=3)
+
+    for module in private_modules:
+        try:
+            return getattr(import_module(f"scipy.{sub_package}.{module}"), attribute)
+        except AttributeError as e:
+            # still raise an error if the attribute isn't in any of the expected
+            # private modules
+            if module == private_modules[-1]:
+                raise e
+            continue
+    
+
+def _deprecated(msg, stacklevel=2):
+    """Deprecate a function by emitting a warning on use."""
+    def wrap(fun):
+        if isinstance(fun, type):
+            warnings.warn(
+                f"Trying to deprecate class {fun!r}",
+                category=RuntimeWarning, stacklevel=2)
+            return fun
+
+        @functools.wraps(fun)
+        def call(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning,
+                          stacklevel=stacklevel)
+            return fun(*args, **kwargs)
+        call.__doc__ = fun.__doc__
+        return call
+
+    return wrap
+
+
+class _DeprecationHelperStr:
+    """
+    Helper class used by deprecate_cython_api
+    """
+    def __init__(self, content, message):
+        self._content = content
+        self._message = message
+
+    def __hash__(self):
+        return hash(self._content)
+
+    def __eq__(self, other):
+        res = (self._content == other)
+        if res:
+            warnings.warn(self._message, category=DeprecationWarning,
+                          stacklevel=2)
+        return res
+
+
+def deprecate_cython_api(module, routine_name, new_name=None, message=None):
+    """
+    Deprecate an exported cdef function in a public Cython API module.
+
+    Only functions can be deprecated; typedefs etc. cannot.
+
+    Parameters
+    ----------
+    module : module
+        Public Cython API module (e.g. scipy.linalg.cython_blas).
+    routine_name : str
+        Name of the routine to deprecate. May also be a fused-type
+        routine (in which case its all specializations are deprecated).
+    new_name : str
+        New name to include in the deprecation warning message
+    message : str
+        Additional text in the deprecation warning message
+
+    Examples
+    --------
+    Usually, this function would be used in the top-level of the
+    module ``.pyx`` file:
+
+    >>> from scipy._lib.deprecation import deprecate_cython_api
+    >>> import scipy.linalg.cython_blas as mod
+    >>> deprecate_cython_api(mod, "dgemm", "dgemm_new",
+    ...                      message="Deprecated in Scipy 1.5.0")
+    >>> del deprecate_cython_api, mod
+
+    After this, Cython modules that use the deprecated function emit a
+    deprecation warning when they are imported.
+
+    """
+    old_name = f"{module.__name__}.{routine_name}"
+
+    if new_name is None:
+        depdoc = "`%s` is deprecated!" % old_name
+    else:
+        depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
+
+    if message is not None:
+        depdoc += "\n" + message
+
+    d = module.__pyx_capi__
+
+    # Check if the function is a fused-type function with a mangled name
+    j = 0
+    has_fused = False
+    while True:
+        fused_name = f"__pyx_fuse_{j}{routine_name}"
+        if fused_name in d:
+            has_fused = True
+            d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)
+            j += 1
+        else:
+            break
+
+    # If not, apply deprecation to the named routine
+    if not has_fused:
+        d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)
+
+
+# taken from scikit-learn, see
+# https://github.com/scikit-learn/scikit-learn/blob/1.3.0/sklearn/utils/validation.py#L38
+def _deprecate_positional_args(func=None, *, version=None):
+    """Decorator for methods that issues warnings for positional arguments.
+
+    Using the keyword-only argument syntax in pep 3102, arguments after the
+    * will issue a warning when passed as a positional argument.
+
+    Parameters
+    ----------
+    func : callable, default=None
+        Function to check arguments on.
+    version : callable, default=None
+        The version when positional arguments will result in error.
+    """
+    if version is None:
+        msg = "Need to specify a version where signature will be changed"
+        raise ValueError(msg)
+
+    def _inner_deprecate_positional_args(f):
+        sig = signature(f)
+        kwonly_args = []
+        all_args = []
+
+        for name, param in sig.parameters.items():
+            if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
+                all_args.append(name)
+            elif param.kind == Parameter.KEYWORD_ONLY:
+                kwonly_args.append(name)
+
+        @functools.wraps(f)
+        def inner_f(*args, **kwargs):
+            extra_args = len(args) - len(all_args)
+            if extra_args <= 0:
+                return f(*args, **kwargs)
+
+            # extra_args > 0
+            args_msg = [
+                f"{name}={arg}"
+                for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
+            ]
+            args_msg = ", ".join(args_msg)
+            warnings.warn(
+                (
+                    f"You are passing {args_msg} as a positional argument. "
+                    "Please change your invocation to use keyword arguments. "
+                    f"From SciPy {version}, passing these as positional "
+                    "arguments will result in an error."
+                ),
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            kwargs.update(zip(sig.parameters, args))
+            return f(**kwargs)
+
+        return inner_f
+
+    if func is not None:
+        return _inner_deprecate_positional_args(func)
+
+    return _inner_deprecate_positional_args
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/doccer.py b/venv/lib/python3.10/site-packages/scipy/_lib/doccer.py
new file mode 100644
index 0000000000000000000000000000000000000000..707f97017b81871e3c495a39e47587cf1f17175c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/doccer.py
@@ -0,0 +1,275 @@
+''' Utilities to allow inserting docstring fragments for common
+parameters into function and method docstrings'''
+
+import sys
+
+__all__ = [
+    'docformat', 'inherit_docstring_from', 'indentcount_lines',
+    'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring',
+    'replace_notes_in_docstring', 'doc_replace'
+]
+
+
+def docformat(docstring, docdict=None):
+    ''' Fill a function docstring from variables in dictionary
+
+    Adapt the indent of the inserted docs
+
+    Parameters
+    ----------
+    docstring : string
+        docstring from function, possibly with dict formatting strings
+    docdict : dict, optional
+        dictionary with keys that match the dict formatting strings
+        and values that are docstring fragments to be inserted. The
+        indentation of the inserted docstrings is set to match the
+        minimum indentation of the ``docstring`` by adding this
+        indentation to all lines of the inserted string, except the
+        first.
+
+    Returns
+    -------
+    outstring : string
+        string with requested ``docdict`` strings inserted
+
+    Examples
+    --------
+    >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
+    ' Test string with inserted value'
+    >>> docstring = 'First line\\n    Second line\\n    %(value)s'
+    >>> inserted_string = "indented\\nstring"
+    >>> docdict = {'value': inserted_string}
+    >>> docformat(docstring, docdict)
+    'First line\\n    Second line\\n    indented\\n    string'
+    '''
+    if not docstring:
+        return docstring
+    if docdict is None:
+        docdict = {}
+    if not docdict:
+        return docstring
+    lines = docstring.expandtabs().splitlines()
+    # Find the minimum indent of the main docstring, after first line
+    if len(lines) < 2:
+        icount = 0
+    else:
+        icount = indentcount_lines(lines[1:])
+    indent = ' ' * icount
+    # Insert this indent to dictionary docstrings
+    indented = {}
+    for name, dstr in docdict.items():
+        lines = dstr.expandtabs().splitlines()
+        try:
+            newlines = [lines[0]]
+            for line in lines[1:]:
+                newlines.append(indent+line)
+            indented[name] = '\n'.join(newlines)
+        except IndexError:
+            indented[name] = dstr
+    return docstring % indented
+
+
+def inherit_docstring_from(cls):
+    """
+    This decorator modifies the decorated function's docstring by
+    replacing occurrences of '%(super)s' with the docstring of the
+    method of the same name from the class `cls`.
+
+    If the decorated method has no docstring, it is simply given the
+    docstring of `cls`s method.
+
+    Parameters
+    ----------
+    cls : Python class or instance
+        A class with a method with the same name as the decorated method.
+        The docstring of the method in this class replaces '%(super)s' in the
+        docstring of the decorated method.
+
+    Returns
+    -------
+    f : function
+        The decorator function that modifies the __doc__ attribute
+        of its argument.
+
+    Examples
+    --------
+    In the following, the docstring for Bar.func created using the
+    docstring of `Foo.func`.
+
+    >>> class Foo:
+    ...     def func(self):
+    ...         '''Do something useful.'''
+    ...         return
+    ...
+    >>> class Bar(Foo):
+    ...     @inherit_docstring_from(Foo)
+    ...     def func(self):
+    ...         '''%(super)s
+    ...         Do it fast.
+    ...         '''
+    ...         return
+    ...
+    >>> b = Bar()
+    >>> b.func.__doc__
+    'Do something useful.\n        Do it fast.\n        '
+
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        func_docstring = func.__doc__
+        if func_docstring is None:
+            func.__doc__ = cls_docstring
+        else:
+            new_docstring = func_docstring % dict(super=cls_docstring)
+            func.__doc__ = new_docstring
+        return func
+    return _doc
+
+
+def extend_notes_in_docstring(cls, notes):
+    """
+    This decorator replaces the decorated function's docstring
+    with the docstring from corresponding method in `cls`.
+    It extends the 'Notes' section of that docstring to include
+    the given `notes`.
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        # If python is called with -OO option,
+        # there is no docstring
+        if cls_docstring is None:
+            return func
+        end_of_notes = cls_docstring.find('        References\n')
+        if end_of_notes == -1:
+            end_of_notes = cls_docstring.find('        Examples\n')
+            if end_of_notes == -1:
+                end_of_notes = len(cls_docstring)
+        func.__doc__ = (cls_docstring[:end_of_notes] + notes +
+                        cls_docstring[end_of_notes:])
+        return func
+    return _doc
+
+
+def replace_notes_in_docstring(cls, notes):
+    """
+    This decorator replaces the decorated function's docstring
+    with the docstring from corresponding method in `cls`.
+    It replaces the 'Notes' section of that docstring with
+    the given `notes`.
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        notes_header = '        Notes\n        -----\n'
+        # If python is called with -OO option,
+        # there is no docstring
+        if cls_docstring is None:
+            return func
+        start_of_notes = cls_docstring.find(notes_header)
+        end_of_notes = cls_docstring.find('        References\n')
+        if end_of_notes == -1:
+            end_of_notes = cls_docstring.find('        Examples\n')
+            if end_of_notes == -1:
+                end_of_notes = len(cls_docstring)
+        func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] +
+                        notes +
+                        cls_docstring[end_of_notes:])
+        return func
+    return _doc
+
+
+def indentcount_lines(lines):
+    ''' Minimum indent for all lines in line list
+
+    >>> lines = [' one', '  two', '   three']
+    >>> indentcount_lines(lines)
+    1
+    >>> lines = []
+    >>> indentcount_lines(lines)
+    0
+    >>> lines = [' one']
+    >>> indentcount_lines(lines)
+    1
+    >>> indentcount_lines(['    '])
+    0
+    '''
+    indentno = sys.maxsize
+    for line in lines:
+        stripped = line.lstrip()
+        if stripped:
+            indentno = min(indentno, len(line) - len(stripped))
+    if indentno == sys.maxsize:
+        return 0
+    return indentno
+
+
+def filldoc(docdict, unindent_params=True):
+    ''' Return docstring decorator using docdict variable dictionary
+
+    Parameters
+    ----------
+    docdict : dictionary
+        dictionary containing name, docstring fragment pairs
+    unindent_params : {False, True}, boolean, optional
+        If True, strip common indentation from all parameters in
+        docdict
+
+    Returns
+    -------
+    decfunc : function
+        decorator that applies dictionary to input function docstring
+
+    '''
+    if unindent_params:
+        docdict = unindent_dict(docdict)
+
+    def decorate(f):
+        f.__doc__ = docformat(f.__doc__, docdict)
+        return f
+    return decorate
+
+
+def unindent_dict(docdict):
+    ''' Unindent all strings in a docdict '''
+    can_dict = {}
+    for name, dstr in docdict.items():
+        can_dict[name] = unindent_string(dstr)
+    return can_dict
+
+
+def unindent_string(docstring):
+    ''' Set docstring to minimum indent for all lines, including first
+
+    >>> unindent_string(' two')
+    'two'
+    >>> unindent_string('  two\\n   three')
+    'two\\n three'
+    '''
+    lines = docstring.expandtabs().splitlines()
+    icount = indentcount_lines(lines)
+    if icount == 0:
+        return docstring
+    return '\n'.join([line[icount:] for line in lines])
+
+
+def doc_replace(obj, oldval, newval):
+    """Decorator to take the docstring from obj, with oldval replaced by newval
+
+    Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
+
+    Parameters
+    ----------
+    obj : object
+        The object to take the docstring from.
+    oldval : string
+        The string to replace from the original docstring.
+    newval : string
+        The string to replace ``oldval`` with.
+    """
+    # __doc__ may be None for optimized Python (-OO)
+    doc = (obj.__doc__ or '').replace(oldval, newval)
+
+    def inner(func):
+        func.__doc__ = doc
+        return func
+
+    return inner
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..ad85c7032d58632ae26c22e6a0adefc9b30a7688
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91e59f87f6a8dfd5955aa58aaa23df701c660d92
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb6c9bed647878749a1e20a87b1870b706153ea6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74d3c86ff33abd7fe58873d8f59a4eda1d4393fa
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ddd8acb219585d9d3388d1483383d8bd8bb548c2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..806be88c904c673253536c302b398280e8a10d3e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e4aae455e6560dcb71984830bfb80152a499334
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13a19ff232792d81a73802817f71c4e7c902e5c0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..137db83062d1191c7f6c156b8f1a0839bf276eb0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d13b06ccb9ea3b02f64e54ad0666ef496ceef825
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2ecbdee6ec2e9f0155153945a90516ef4295905
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba77e10003bb9c9be2a5b15effa6e0e35cd223f1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50d2de56e2e025814f245ea88b06509b5da8c00d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e83b0ec567ebcc6e36cda2018a532504cf2ed474
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf68ee5d0a9c7a893c89a3e79e3b50580e7cc36a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74ec4682ebae07d56fc4e8f7812a2be281ff9d6c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..74307fa0151cf1f6562acf4200e969f41ad2a006
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py
@@ -0,0 +1,101 @@
+""" Test for assert_deallocated context manager and gc utilities
+"""
+import gc
+
+from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
+                                 ReferenceError, IS_PYPY)
+
+from numpy.testing import assert_equal
+
+import pytest
+
+
+def test_set_gc_state():
+    gc_status = gc.isenabled()
+    try:
+        for state in (True, False):
+            gc.enable()
+            set_gc_state(state)
+            assert_equal(gc.isenabled(), state)
+            gc.disable()
+            set_gc_state(state)
+            assert_equal(gc.isenabled(), state)
+    finally:
+        if gc_status:
+            gc.enable()
+
+
+def test_gc_state():
+    # Test gc_state context manager
+    gc_status = gc.isenabled()
+    try:
+        for pre_state in (True, False):
+            set_gc_state(pre_state)
+            for with_state in (True, False):
+                # Check the gc state is with_state in with block
+                with gc_state(with_state):
+                    assert_equal(gc.isenabled(), with_state)
+                # And returns to previous state outside block
+                assert_equal(gc.isenabled(), pre_state)
+                # Even if the gc state is set explicitly within the block
+                with gc_state(with_state):
+                    assert_equal(gc.isenabled(), with_state)
+                    set_gc_state(not with_state)
+                assert_equal(gc.isenabled(), pre_state)
+    finally:
+        if gc_status:
+            gc.enable()
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated():
+    # Ordinary use
+    class C:
+        def __init__(self, arg0, arg1, name='myname'):
+            self.name = name
+    for gc_current in (True, False):
+        with gc_state(gc_current):
+            # We are deleting from with-block context, so that's OK
+            with assert_deallocated(C, 0, 2, 'another name') as c:
+                assert_equal(c.name, 'another name')
+                del c
+            # Or not using the thing in with-block context, also OK
+            with assert_deallocated(C, 0, 2, name='third name'):
+                pass
+            assert_equal(gc.isenabled(), gc_current)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_nodel():
+    class C:
+        pass
+    with pytest.raises(ReferenceError):
+        # Need to delete after using if in with-block context
+        # Note: assert_deallocated(C) needs to be assigned for the test
+        # to function correctly.  It is assigned to _, but _ itself is
+        # not referenced in the body of the with, it is only there for
+        # the refcount.
+        with assert_deallocated(C) as _:
+            pass
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular():
+    class C:
+        def __init__(self):
+            self._circular = self
+    with pytest.raises(ReferenceError):
+        # Circular reference, no automatic garbage collection
+        with assert_deallocated(C) as c:
+            del c
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular2():
+    class C:
+        def __init__(self):
+            self._circular = self
+    with pytest.raises(ReferenceError):
+        # Still circular reference, no automatic garbage collection
+        with assert_deallocated(C):
+            pass
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f5b71c8f1e13b42de2e8e612a005dec409fc025
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py
@@ -0,0 +1,67 @@
+from pytest import raises as assert_raises
+from scipy._lib._pep440 import Version, parse
+
+
+def test_main_versions():
+    assert Version('1.8.0') == Version('1.8.0')
+    for ver in ['1.9.0', '2.0.0', '1.8.1']:
+        assert Version('1.8.0') < Version(ver)
+
+    for ver in ['1.7.0', '1.7.1', '0.9.9']:
+        assert Version('1.8.0') > Version(ver)
+
+
+def test_version_1_point_10():
+    # regression test for gh-2998.
+    assert Version('1.9.0') < Version('1.10.0')
+    assert Version('1.11.0') < Version('1.11.1')
+    assert Version('1.11.0') == Version('1.11.0')
+    assert Version('1.99.11') < Version('1.99.12')
+
+
+def test_alpha_beta_rc():
+    assert Version('1.8.0rc1') == Version('1.8.0rc1')
+    for ver in ['1.8.0', '1.8.0rc2']:
+        assert Version('1.8.0rc1') < Version(ver)
+
+    for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
+        assert Version('1.8.0rc1') > Version(ver)
+
+    assert Version('1.8.0b1') > Version('1.8.0a2')
+
+
+def test_dev_version():
+    assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
+    for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
+        assert Version('1.9.0.dev+f16acvda') < Version(ver)
+
+    assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
+
+
+def test_dev_a_b_rc_mixed():
+    assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
+    assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
+
+
+def test_dev0_version():
+    assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
+    for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+        assert Version('1.9.0.dev0+f16acvda') < Version(ver)
+
+    assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
+
+
+def test_dev0_a_b_rc_mixed():
+    assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
+    assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
+
+
+def test_raises():
+    for ver in ['1,9.0', '1.7.x']:
+        assert_raises(ValueError, Version, ver)
+
+def test_legacy_version():
+    # Non-PEP-440 version identifiers always compare less. For NumPy this only
+    # occurs on dev builds prior to 1.10.0 which are unsupported anyway.
+    assert parse('invalid') < Version('0.0.0')
+    assert parse('1.9.0-f16acvda') < Version('1.0.0')
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..88db113d6d5a35c96ecc0a6a36ab42d74be49153
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py
@@ -0,0 +1,32 @@
+import sys
+from scipy._lib._testutils import _parse_size, _get_mem_available
+import pytest
+
+
+def test__parse_size():
+    expected = {
+        '12': 12e6,
+        '12 b': 12,
+        '12k': 12e3,
+        '  12  M  ': 12e6,
+        '  12  G  ': 12e9,
+        ' 12Tb ': 12e12,
+        '12  Mib ': 12 * 1024.0**2,
+        '12Tib': 12 * 1024.0**4,
+    }
+
+    for inp, outp in sorted(expected.items()):
+        if outp is None:
+            with pytest.raises(ValueError):
+                _parse_size(inp)
+        else:
+            assert _parse_size(inp) == outp
+
+
+def test__mem_available():
+    # May return None on non-Linux platforms
+    available = _get_mem_available()
+    if sys.platform.startswith('linux'):
+        assert available >= 0
+    else:
+        assert available is None or available >= 0
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py
new file mode 100644
index 0000000000000000000000000000000000000000..87ae85ef318da2b8bb104c4a87faa4e4021c01d5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py
@@ -0,0 +1,51 @@
+import threading
+import time
+import traceback
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
+
+
+def test_parallel_threads():
+    # Check that ReentrancyLock serializes work in parallel threads.
+    #
+    # The test is not fully deterministic, and may succeed falsely if
+    # the timings go wrong.
+
+    lock = ReentrancyLock("failure")
+
+    failflag = [False]
+    exceptions_raised = []
+
+    def worker(k):
+        try:
+            with lock:
+                assert_(not failflag[0])
+                failflag[0] = True
+                time.sleep(0.1 * k)
+                assert_(failflag[0])
+                failflag[0] = False
+        except Exception:
+            exceptions_raised.append(traceback.format_exc(2))
+
+    threads = [threading.Thread(target=lambda k=k: worker(k))
+               for k in range(3)]
+    for t in threads:
+        t.start()
+    for t in threads:
+        t.join()
+
+    exceptions_raised = "\n".join(exceptions_raised)
+    assert_(not exceptions_raised, exceptions_raised)
+
+
+def test_reentering():
+    # Check that ReentrancyLock prevents re-entering from the same thread.
+
+    @non_reentrant()
+    def func(x):
+        return func(x)
+
+    assert_raises(ReentrancyError, func, 0)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py
new file mode 100644
index 0000000000000000000000000000000000000000..691bf3380dd530e27c957f996283a25cd0585982
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py
@@ -0,0 +1,408 @@
+from multiprocessing import Pool
+from multiprocessing.pool import Pool as PWL
+import re
+import math
+from fractions import Fraction
+
+import numpy as np
+from numpy.testing import assert_equal, assert_
+import pytest
+from pytest import raises as assert_raises
+import hypothesis.extra.numpy as npst
+from hypothesis import given, strategies, reproduce_failure  # noqa: F401
+from scipy.conftest import array_api_compatible
+
+from scipy._lib._array_api import xp_assert_equal
+from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
+                              getfullargspec_no_self, FullArgSpec,
+                              rng_integers, _validate_int, _rename_parameter,
+                              _contains_nan, _rng_html_rewrite, _lazywhere)
+
+
+def test__aligned_zeros():
+    niter = 10
+
+    def check(shape, dtype, order, align):
+        err_msg = repr((shape, dtype, order, align))
+        x = _aligned_zeros(shape, dtype, order, align=align)
+        if align is None:
+            align = np.dtype(dtype).alignment
+        assert_equal(x.__array_interface__['data'][0] % align, 0)
+        if hasattr(shape, '__len__'):
+            assert_equal(x.shape, shape, err_msg)
+        else:
+            assert_equal(x.shape, (shape,), err_msg)
+        assert_equal(x.dtype, dtype)
+        if order == "C":
+            assert_(x.flags.c_contiguous, err_msg)
+        elif order == "F":
+            if x.size > 0:
+                # Size-0 arrays get invalid flags on NumPy 1.5
+                assert_(x.flags.f_contiguous, err_msg)
+        elif order is None:
+            assert_(x.flags.c_contiguous, err_msg)
+        else:
+            raise ValueError()
+
+    # try various alignments
+    for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
+        for n in [0, 1, 3, 11]:
+            for order in ["C", "F", None]:
+                for dtype in [np.uint8, np.float64]:
+                    for shape in [n, (1, 2, 3, n)]:
+                        for j in range(niter):
+                            check(shape, dtype, order, align)
+
+
+def test_check_random_state():
+    # If seed is None, return the RandomState singleton used by np.random.
+    # If seed is an int, return a new RandomState instance seeded with seed.
+    # If seed is already a RandomState instance, return it.
+    # Otherwise raise ValueError.
+    rsi = check_random_state(1)
+    assert_equal(type(rsi), np.random.RandomState)
+    rsi = check_random_state(rsi)
+    assert_equal(type(rsi), np.random.RandomState)
+    rsi = check_random_state(None)
+    assert_equal(type(rsi), np.random.RandomState)
+    assert_raises(ValueError, check_random_state, 'a')
+    rg = np.random.Generator(np.random.PCG64())
+    rsi = check_random_state(rg)
+    assert_equal(type(rsi), np.random.Generator)
+
+
+def test_getfullargspec_no_self():
+    p = MapWrapper(1)
+    argspec = getfullargspec_no_self(p.__init__)
+    assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [],
+                                      None, {}))
+    argspec = getfullargspec_no_self(p.__call__)
+    assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None,
+                                      [], None, {}))
+
+    class _rv_generic:
+        def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
+            return None
+
+    rv_obj = _rv_generic()
+    argspec = getfullargspec_no_self(rv_obj._rvs)
+    assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs',
+                                      (2, 3), ['size'], {'size': None}, {}))
+
+
+def test_mapwrapper_serial():
+    in_arg = np.arange(10.)
+    out_arg = np.sin(in_arg)
+
+    p = MapWrapper(1)
+    assert_(p._mapfunc is map)
+    assert_(p.pool is None)
+    assert_(p._own_pool is False)
+    out = list(p(np.sin, in_arg))
+    assert_equal(out, out_arg)
+
+    with assert_raises(RuntimeError):
+        p = MapWrapper(0)
+
+
+def test_pool():
+    with Pool(2) as p:
+        p.map(math.sin, [1, 2, 3, 4])
+
+
+def test_mapwrapper_parallel():
+    in_arg = np.arange(10.)
+    out_arg = np.sin(in_arg)
+
+    with MapWrapper(2) as p:
+        out = p(np.sin, in_arg)
+        assert_equal(list(out), out_arg)
+
+        assert_(p._own_pool is True)
+        assert_(isinstance(p.pool, PWL))
+        assert_(p._mapfunc is not None)
+
+    # the context manager should've closed the internal pool
+    # check that it has by asking it to calculate again.
+    with assert_raises(Exception) as excinfo:
+        p(np.sin, in_arg)
+
+    assert_(excinfo.type is ValueError)
+
+    # can also set a PoolWrapper up with a map-like callable instance
+    with Pool(2) as p:
+        q = MapWrapper(p.map)
+
+        assert_(q._own_pool is False)
+        q.close()
+
+        # closing the PoolWrapper shouldn't close the internal pool
+        # because it didn't create it
+        out = p.map(np.sin, in_arg)
+        assert_equal(list(out), out_arg)
+
+
+def test_rng_integers():
+    rng = np.random.RandomState()
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # now try with np.random.Generator
+    try:
+        rng = np.random.default_rng()
+    except AttributeError:
+        return
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+
+class TestValidateInt:
+
+    @pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)])
+    def test_validate_int(self, n):
+        n = _validate_int(n, 'n')
+        assert n == 4
+
+    @pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)])
+    def test_validate_int_bad(self, n):
+        with pytest.raises(TypeError, match='n must be an integer'):
+            _validate_int(n, 'n')
+
+    def test_validate_int_below_min(self):
+        with pytest.raises(ValueError, match='n must be an integer not '
+                                             'less than 0'):
+            _validate_int(-1, 'n', 0)
+
+
+class TestRenameParameter:
+    # check that wrapper `_rename_parameter` for backward-compatible
+    # keyword renaming works correctly
+
+    # Example method/function that still accepts keyword `old`
+    @_rename_parameter("old", "new")
+    def old_keyword_still_accepted(self, new):
+        return new
+
+    # Example method/function for which keyword `old` is deprecated
+    @_rename_parameter("old", "new", dep_version="1.9.0")
+    def old_keyword_deprecated(self, new):
+        return new
+
+    def test_old_keyword_still_accepted(self):
+        # positional argument and both keyword work identically
+        res1 = self.old_keyword_still_accepted(10)
+        res2 = self.old_keyword_still_accepted(new=10)
+        res3 = self.old_keyword_still_accepted(old=10)
+        assert res1 == res2 == res3 == 10
+
+        # unexpected keyword raises an error
+        message = re.escape("old_keyword_still_accepted() got an unexpected")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(unexpected=10)
+
+        # multiple values for the same parameter raises an error
+        message = re.escape("old_keyword_still_accepted() got multiple")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(10, new=10)
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(10, old=10)
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(new=10, old=10)
+
+    def test_old_keyword_deprecated(self):
+        # positional argument and both keyword work identically,
+        # but use of old keyword results in DeprecationWarning
+        dep_msg = "Use of keyword argument `old` is deprecated"
+        res1 = self.old_keyword_deprecated(10)
+        res2 = self.old_keyword_deprecated(new=10)
+        with pytest.warns(DeprecationWarning, match=dep_msg):
+            res3 = self.old_keyword_deprecated(old=10)
+        assert res1 == res2 == res3 == 10
+
+        # unexpected keyword raises an error
+        message = re.escape("old_keyword_deprecated() got an unexpected")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_deprecated(unexpected=10)
+
+        # multiple values for the same parameter raises an error and,
+        # if old keyword is used, results in DeprecationWarning
+        message = re.escape("old_keyword_deprecated() got multiple")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_deprecated(10, new=10)
+        with pytest.raises(TypeError, match=message), \
+                pytest.warns(DeprecationWarning, match=dep_msg):
+            self.old_keyword_deprecated(10, old=10)
+        with pytest.raises(TypeError, match=message), \
+                pytest.warns(DeprecationWarning, match=dep_msg):
+            self.old_keyword_deprecated(new=10, old=10)
+
+
+class TestContainsNaNTest:
+
+    def test_policy(self):
+        data = np.array([1, 2, 3, np.nan])
+
+        contains_nan, nan_policy = _contains_nan(data, nan_policy="propagate")
+        assert contains_nan
+        assert nan_policy == "propagate"
+
+        contains_nan, nan_policy = _contains_nan(data, nan_policy="omit")
+        assert contains_nan
+        assert nan_policy == "omit"
+
+        msg = "The input contains nan values"
+        with pytest.raises(ValueError, match=msg):
+            _contains_nan(data, nan_policy="raise")
+
+        msg = "nan_policy must be one of"
+        with pytest.raises(ValueError, match=msg):
+            _contains_nan(data, nan_policy="nan")
+
+    def test_contains_nan_1d(self):
+        data1 = np.array([1, 2, 3])
+        assert not _contains_nan(data1)[0]
+
+        data2 = np.array([1, 2, 3, np.nan])
+        assert _contains_nan(data2)[0]
+
+        data3 = np.array([np.nan, 2, 3, np.nan])
+        assert _contains_nan(data3)[0]
+
+        data4 = np.array([1, 2, "3", np.nan])  # converted to string "nan"
+        assert not _contains_nan(data4)[0]
+
+        data5 = np.array([1, 2, "3", np.nan], dtype='object')
+        assert _contains_nan(data5)[0]
+
+    def test_contains_nan_2d(self):
+        data1 = np.array([[1, 2], [3, 4]])
+        assert not _contains_nan(data1)[0]
+
+        data2 = np.array([[1, 2], [3, np.nan]])
+        assert _contains_nan(data2)[0]
+
+        data3 = np.array([["1", 2], [3, np.nan]])  # converted to string "nan"
+        assert not _contains_nan(data3)[0]
+
+        data4 = np.array([["1", 2], [3, np.nan]], dtype='object')
+        assert _contains_nan(data4)[0]
+
+
+def test__rng_html_rewrite():
+    def mock_str():
+        lines = [
+            'np.random.default_rng(8989843)',
+            'np.random.default_rng(seed)',
+            'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)',
+            ' bob ',
+        ]
+        return lines
+
+    res = _rng_html_rewrite(mock_str)()
+    ref = [
+        'np.random.default_rng()',
+        'np.random.default_rng(seed)',
+        'np.random.default_rng()',
+        ' bob ',
+    ]
+
+    assert res == ref
+
+
+class TestLazywhere:
+    n_arrays = strategies.integers(min_value=1, max_value=3)
+    rng_seed = strategies.integers(min_value=1000000000, max_value=9999999999)
+    dtype = strategies.sampled_from((np.float32, np.float64))
+    p = strategies.floats(min_value=0, max_value=1)
+    data = strategies.data()
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')  # overflows, etc.
+    @array_api_compatible
+    @given(n_arrays=n_arrays, rng_seed=rng_seed, dtype=dtype, p=p, data=data)
+    def test_basic(self, n_arrays, rng_seed, dtype, p, data, xp):
+        mbs = npst.mutually_broadcastable_shapes(num_shapes=n_arrays+1,
+                                                 min_side=0)
+        input_shapes, result_shape = data.draw(mbs)
+        cond_shape, *shapes = input_shapes
+        fillvalue = xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=tuple())))
+        arrays = [xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=shape)))
+                  for shape in shapes]
+
+        def f(*args):
+            return sum(arg for arg in args)
+
+        def f2(*args):
+            return sum(arg for arg in args) / 2
+
+        rng = np.random.default_rng(rng_seed)
+        cond = xp.asarray(rng.random(size=cond_shape) > p)
+
+        res1 = _lazywhere(cond, arrays, f, fillvalue)
+        res2 = _lazywhere(cond, arrays, f, f2=f2)
+
+        # Ensure arrays are at least 1d to follow sane type promotion rules.
+        if xp == np:
+            cond, fillvalue, *arrays = np.atleast_1d(cond, fillvalue, *arrays)
+
+        ref1 = xp.where(cond, f(*arrays), fillvalue)
+        ref2 = xp.where(cond, f(*arrays), f2(*arrays))
+
+        if xp == np:
+            ref1 = ref1.reshape(result_shape)
+            ref2 = ref2.reshape(result_shape)
+            res1 = xp.asarray(res1)[()]
+            res2 = xp.asarray(res2)[()]
+
+        isinstance(res1, type(xp.asarray([])))
+        xp_assert_equal(res1, ref1)
+        assert_equal(res1.shape, ref1.shape)
+        assert_equal(res1.dtype, ref1.dtype)
+
+        isinstance(res2, type(xp.asarray([])))
+        xp_assert_equal(res2, ref2)
+        assert_equal(res2.shape, ref2.shape)
+        assert_equal(res2.dtype, ref2.dtype)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..defe238d58479bee263ee29199319f39a8193765
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py
@@ -0,0 +1,109 @@
+import numpy as np
+import pytest
+
+from scipy.conftest import array_api_compatible
+from scipy._lib._array_api import (
+    _GLOBAL_CONFIG, array_namespace, _asarray, copy, xp_assert_equal, is_numpy
+)
+import scipy._lib.array_api_compat.numpy as np_compat
+
+
+@pytest.mark.skipif(not _GLOBAL_CONFIG["SCIPY_ARRAY_API"],
+        reason="Array API test; set environment variable SCIPY_ARRAY_API=1 to run it")
+class TestArrayAPI:
+
+    def test_array_namespace(self):
+        x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
+        xp = array_namespace(x, y)
+        assert 'array_api_compat.numpy' in xp.__name__
+
+        _GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False
+        xp = array_namespace(x, y)
+        assert 'array_api_compat.numpy' in xp.__name__
+        _GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True
+
+    @array_api_compatible
+    def test_asarray(self, xp):
+        x, y = _asarray([0, 1, 2], xp=xp), _asarray(np.arange(3), xp=xp)
+        ref = xp.asarray([0, 1, 2])
+        xp_assert_equal(x, ref)
+        xp_assert_equal(y, ref)
+
+    @pytest.mark.filterwarnings("ignore: the matrix subclass")
+    def test_raises(self):
+        msg = "of type `numpy.ma.MaskedArray` are not supported"
+        with pytest.raises(TypeError, match=msg):
+            array_namespace(np.ma.array(1), np.array(1))
+
+        msg = "of type `numpy.matrix` are not supported"
+        with pytest.raises(TypeError, match=msg):
+            array_namespace(np.array(1), np.matrix(1))
+
+        msg = "only boolean and numerical dtypes are supported"
+        with pytest.raises(TypeError, match=msg):
+            array_namespace([object()])
+        with pytest.raises(TypeError, match=msg):
+            array_namespace('abc')
+
+    def test_array_likes(self):
+        # should be no exceptions
+        array_namespace([0, 1, 2])
+        array_namespace(1, 2, 3)
+        array_namespace(1)
+
+    @array_api_compatible
+    def test_copy(self, xp):
+        for _xp in [xp, None]:
+            x = xp.asarray([1, 2, 3])
+            y = copy(x, xp=_xp)
+            # with numpy we'd want to use np.shared_memory, but that's not specified
+            # in the array-api
+            x[0] = 10
+            x[1] = 11
+            x[2] = 12
+
+            assert x[0] != y[0]
+            assert x[1] != y[1]
+            assert x[2] != y[2]
+            assert id(x) != id(y)
+
+    @array_api_compatible
+    @pytest.mark.parametrize('dtype', ['int32', 'int64', 'float32', 'float64'])
+    @pytest.mark.parametrize('shape', [(), (3,)])
+    def test_strict_checks(self, xp, dtype, shape):
+        # Check that `_strict_check` behaves as expected
+        dtype = getattr(xp, dtype)
+        x = xp.broadcast_to(xp.asarray(1, dtype=dtype), shape)
+        x = x if shape else x[()]
+        y = np_compat.asarray(1)[()]
+
+        options = dict(check_namespace=True, check_dtype=False, check_shape=False)
+        if xp == np:
+            xp_assert_equal(x, y, **options)
+        else:
+            with pytest.raises(AssertionError, match="Namespaces do not match."):
+                xp_assert_equal(x, y, **options)
+
+        options = dict(check_namespace=False, check_dtype=True, check_shape=False)
+        if y.dtype.name in str(x.dtype):
+            xp_assert_equal(x, y, **options)
+        else:
+            with pytest.raises(AssertionError, match="dtypes do not match."):
+                xp_assert_equal(x, y, **options)
+
+        options = dict(check_namespace=False, check_dtype=False, check_shape=True)
+        if x.shape == y.shape:
+            xp_assert_equal(x, y, **options)
+        else:
+            with pytest.raises(AssertionError, match="Shapes do not match."):
+                xp_assert_equal(x, y, **options)
+
+    @array_api_compatible
+    def test_check_scalar(self, xp):
+        if not is_numpy(xp):
+            pytest.skip("Scalars only exist in NumPy")
+
+        if is_numpy(xp):
+            with pytest.raises(AssertionError, match="Types do not match."):
+                xp_assert_equal(xp.asarray(0.), xp.float64(0))
+            xp_assert_equal(xp.float64(0), xp.asarray(0.))
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f19ca377129b925cad732dd25bf3089c646f923f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py
@@ -0,0 +1,162 @@
+import pytest
+import pickle
+from numpy.testing import assert_equal
+from scipy._lib._bunch import _make_tuple_bunch
+
+
+# `Result` is defined at the top level of the module so it can be
+# used to test pickling.
+Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
+
+
+class TestMakeTupleBunch:
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Tests with Result
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    def setup_method(self):
+        # Set up an instance of Result.
+        self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
+
+    def test_attribute_access(self):
+        assert_equal(self.result.x, 1)
+        assert_equal(self.result.y, 2)
+        assert_equal(self.result.z, 3)
+        assert_equal(self.result.w, 99)
+        assert_equal(self.result.beta, 0.5)
+
+    def test_indexing(self):
+        assert_equal(self.result[0], 1)
+        assert_equal(self.result[1], 2)
+        assert_equal(self.result[2], 3)
+        assert_equal(self.result[-1], 3)
+        with pytest.raises(IndexError, match='index out of range'):
+            self.result[3]
+
+    def test_unpacking(self):
+        x0, y0, z0 = self.result
+        assert_equal((x0, y0, z0), (1, 2, 3))
+        assert_equal(self.result, (1, 2, 3))
+
+    def test_slice(self):
+        assert_equal(self.result[1:], (2, 3))
+        assert_equal(self.result[::2], (1, 3))
+        assert_equal(self.result[::-1], (3, 2, 1))
+
+    def test_len(self):
+        assert_equal(len(self.result), 3)
+
+    def test_repr(self):
+        s = repr(self.result)
+        assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
+
+    def test_hash(self):
+        assert_equal(hash(self.result), hash((1, 2, 3)))
+
+    def test_pickle(self):
+        s = pickle.dumps(self.result)
+        obj = pickle.loads(s)
+        assert isinstance(obj, Result)
+        assert_equal(obj.x, self.result.x)
+        assert_equal(obj.y, self.result.y)
+        assert_equal(obj.z, self.result.z)
+        assert_equal(obj.w, self.result.w)
+        assert_equal(obj.beta, self.result.beta)
+
+    def test_read_only_existing(self):
+        with pytest.raises(AttributeError, match="can't set attribute"):
+            self.result.x = -1
+
+    def test_read_only_new(self):
+        self.result.plate_of_shrimp = "lattice of coincidence"
+        assert self.result.plate_of_shrimp == "lattice of coincidence"
+
+    def test_constructor_missing_parameter(self):
+        with pytest.raises(TypeError, match='missing'):
+            # `w` is missing.
+            Result(x=1, y=2, z=3, beta=0.75)
+
+    def test_constructor_incorrect_parameter(self):
+        with pytest.raises(TypeError, match='unexpected'):
+            # `foo` is not an existing field.
+            Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
+
+    def test_module(self):
+        m = 'scipy._lib.tests.test_bunch'
+        assert_equal(Result.__module__, m)
+        assert_equal(self.result.__module__, m)
+
+    def test_extra_fields_per_instance(self):
+        # This test exists to ensure that instances of the same class
+        # store their own values for the extra fields. That is, the values
+        # are stored per instance and not in the class.
+        result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
+        result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
+        assert_equal(result1.w, -1)
+        assert_equal(result1.beta, 0.0)
+        # The rest of these checks aren't essential, but let's check
+        # them anyway.
+        assert_equal(result1[:], (1, 2, 3))
+        assert_equal(result2.w, 99)
+        assert_equal(result2.beta, 1.0)
+        assert_equal(result2[:], (4, 5, 6))
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Other tests
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    def test_extra_field_names_is_optional(self):
+        Square = _make_tuple_bunch('Square', ['width', 'height'])
+        sq = Square(width=1, height=2)
+        assert_equal(sq.width, 1)
+        assert_equal(sq.height, 2)
+        s = repr(sq)
+        assert_equal(s, 'Square(width=1, height=2)')
+
+    def test_tuple_like(self):
+        Tup = _make_tuple_bunch('Tup', ['a', 'b'])
+        tu = Tup(a=1, b=2)
+        assert isinstance(tu, tuple)
+        assert isinstance(tu + (1,), tuple)
+
+    def test_explicit_module(self):
+        m = 'some.module.name'
+        Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
+        foo = Foo(x=1, a=355, b=113)
+        assert_equal(Foo.__module__, m)
+        assert_equal(foo.__module__, m)
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Argument validation
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    @pytest.mark.parametrize('args', [('123', ['a'], ['b']),
+                                      ('Foo', ['-3'], ['x']),
+                                      ('Foo', ['a'], ['+-*/'])])
+    def test_identifiers_not_allowed(self, args):
+        with pytest.raises(ValueError, match='identifiers'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
+                                      ('Foo', ['a', 'b'], ['b', 'x'])])
+    def test_repeated_field_names(self, args):
+        with pytest.raises(ValueError, match='Duplicate'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
+                                      ('Foo', ['a'], ['_x'])])
+    def test_leading_underscore_not_allowed(self, args):
+        with pytest.raises(ValueError, match='underscore'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
+                                      ('Foo', ['a'], ['or']),
+                                      ('and', ['a'], ['x'])])
+    def test_keyword_not_allowed_in_fields(self, args):
+        with pytest.raises(ValueError, match='keyword'):
+            _make_tuple_bunch(*args)
+
+    def test_at_least_one_field_name_required(self):
+        with pytest.raises(ValueError, match='at least one name'):
+            _make_tuple_bunch('Qwerty', [], ['a', 'b'])
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py
new file mode 100644
index 0000000000000000000000000000000000000000..82021775c294c7b881b9458b57d16deaac483cc7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py
@@ -0,0 +1,204 @@
+from numpy.testing import assert_equal, assert_
+from pytest import raises as assert_raises
+
+import time
+import pytest
+import ctypes
+import threading
+from scipy._lib import _ccallback_c as _test_ccallback_cython
+from scipy._lib import _test_ccallback
+from scipy._lib._ccallback import LowLevelCallable
+
+try:
+    import cffi
+    HAVE_CFFI = True
+except ImportError:
+    HAVE_CFFI = False
+
+
+ERROR_VALUE = 2.0
+
+
+def callback_python(a, user_data=None):
+    if a == ERROR_VALUE:
+        raise ValueError("bad value")
+
+    if user_data is None:
+        return a + 1
+    else:
+        return a + user_data
+
+def _get_cffi_func(base, signature):
+    if not HAVE_CFFI:
+        pytest.skip("cffi not installed")
+
+    # Get function address
+    voidp = ctypes.cast(base, ctypes.c_void_p)
+    address = voidp.value
+
+    # Create corresponding cffi handle
+    ffi = cffi.FFI()
+    func = ffi.cast(signature, address)
+    return func
+
+
+def _get_ctypes_data():
+    value = ctypes.c_double(2.0)
+    return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
+
+
+def _get_cffi_data():
+    if not HAVE_CFFI:
+        pytest.skip("cffi not installed")
+    ffi = cffi.FFI()
+    return ffi.new('double *', 2.0)
+
+
+CALLERS = {
+    'simple': _test_ccallback.test_call_simple,
+    'nodata': _test_ccallback.test_call_nodata,
+    'nonlocal': _test_ccallback.test_call_nonlocal,
+    'cython': _test_ccallback_cython.test_call_cython,
+}
+
+# These functions have signatures known to the callers
+FUNCS = {
+    'python': lambda: callback_python,
+    'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
+    'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
+                                                   "plus1_cython"),
+    'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
+    'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
+                                   'double (*)(double, int *, void *)'),
+    'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
+    'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
+                                                     "plus1b_cython"),
+    'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
+    'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
+                                     'double (*)(double, double, int *, void *)'),
+}
+
+# These functions have signatures the callers don't know
+BAD_FUNCS = {
+    'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
+    'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
+                                                      "plus1bc_cython"),
+    'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
+    'cffi_bc': lambda: _get_cffi_func(
+        _test_ccallback_cython.plus1bc_ctypes,
+        'double (*)(double, double, double, int *, void *)'
+    ),
+}
+
+USER_DATAS = {
+    'ctypes': _get_ctypes_data,
+    'cffi': _get_cffi_data,
+    'capsule': _test_ccallback.test_get_data_capsule,
+}
+
+
+def test_callbacks():
+    def check(caller, func, user_data):
+        caller = CALLERS[caller]
+        func = FUNCS[func]()
+        user_data = USER_DATAS[user_data]()
+
+        if func is callback_python:
+            def func2(x):
+                return func(x, 2.0)
+        else:
+            func2 = LowLevelCallable(func, user_data)
+            func = LowLevelCallable(func)
+
+        # Test basic call
+        assert_equal(caller(func, 1.0), 2.0)
+
+        # Test 'bad' value resulting to an error
+        assert_raises(ValueError, caller, func, ERROR_VALUE)
+
+        # Test passing in user_data
+        assert_equal(caller(func2, 1.0), 3.0)
+
+    for caller in sorted(CALLERS.keys()):
+        for func in sorted(FUNCS.keys()):
+            for user_data in sorted(USER_DATAS.keys()):
+                check(caller, func, user_data)
+
+
+def test_bad_callbacks():
+    def check(caller, func, user_data):
+        caller = CALLERS[caller]
+        user_data = USER_DATAS[user_data]()
+        func = BAD_FUNCS[func]()
+
+        if func is callback_python:
+            def func2(x):
+                return func(x, 2.0)
+        else:
+            func2 = LowLevelCallable(func, user_data)
+            func = LowLevelCallable(func)
+
+        # Test that basic call fails
+        assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
+
+        # Test that passing in user_data also fails
+        assert_raises(ValueError, caller, func2, 1.0)
+
+        # Test error message
+        llfunc = LowLevelCallable(func)
+        try:
+            caller(llfunc, 1.0)
+        except ValueError as err:
+            msg = str(err)
+            assert_(llfunc.signature in msg, msg)
+            assert_('double (double, double, int *, void *)' in msg, msg)
+
+    for caller in sorted(CALLERS.keys()):
+        for func in sorted(BAD_FUNCS.keys()):
+            for user_data in sorted(USER_DATAS.keys()):
+                check(caller, func, user_data)
+
+
+def test_signature_override():
+    caller = _test_ccallback.test_call_simple
+    func = _test_ccallback.test_get_plus1_capsule()
+
+    llcallable = LowLevelCallable(func, signature="bad signature")
+    assert_equal(llcallable.signature, "bad signature")
+    assert_raises(ValueError, caller, llcallable, 3)
+
+    llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
+    assert_equal(llcallable.signature, "double (double, int *, void *)")
+    assert_equal(caller(llcallable, 3), 4)
+
+
+def test_threadsafety():
+    def callback(a, caller):
+        if a <= 0:
+            return 1
+        else:
+            res = caller(lambda x: callback(x, caller), a - 1)
+            return 2*res
+
+    def check(caller):
+        caller = CALLERS[caller]
+
+        results = []
+
+        count = 10
+
+        def run():
+            time.sleep(0.01)
+            r = caller(lambda x: callback(x, caller), count)
+            results.append(r)
+
+        threads = [threading.Thread(target=run) for j in range(20)]
+        for thread in threads:
+            thread.start()
+        for thread in threads:
+            thread.join()
+
+        assert_equal(results, [2.0**count]*len(threads))
+
+    for caller in CALLERS.keys():
+        check(caller)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..7910bd56f6b0c37276c9dff5a15cd3ddf755840e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py
@@ -0,0 +1,10 @@
+import pytest
+
+
+def test_cython_api_deprecation():
+    match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
+             "is deprecated, use `foo` instead!\n"
+             "Deprecated in Scipy 42.0.0")
+    with pytest.warns(DeprecationWarning, match=match):
+        from .. import _test_deprecation_call
+    assert _test_deprecation_call.call() == (1, 1)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py
new file mode 100644
index 0000000000000000000000000000000000000000..e61c57093f6206eb78c4b9e5f1561ff77e83dfd7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py
@@ -0,0 +1,14 @@
+import sys
+import subprocess
+
+from .test_public_api import PUBLIC_MODULES
+
+# Regression tests for gh-6793.
+# Check that all modules are importable in a new Python process.
+# This is not necessarily true if there are import cycles present.
+
+def test_public_modules_importable():
+    pids = [subprocess.Popen([sys.executable, '-c', f'import {module}'])
+            for module in PUBLIC_MODULES]
+    for i, pid in enumerate(pids):
+        assert pid.wait() == 0, f'Failed to import {PUBLIC_MODULES[i]}'
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..be360daaa047b112a5f926b896267b8179443eee
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py
@@ -0,0 +1,491 @@
+"""
+This test script is adopted from:
+    https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
+"""
+
+import pkgutil
+import types
+import importlib
+import warnings
+from importlib import import_module
+
+import pytest
+
+import scipy
+
+from scipy.conftest import xp_available_backends
+
+
+def test_dir_testing():
+    """Assert that output of dir has only one "testing/tester"
+    attribute without duplicate"""
+    assert len(dir(scipy)) == len(set(dir(scipy)))
+
+
+# Historically SciPy has not used leading underscores for private submodules
+# much.  This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
+# but were never intended to be public.  The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+PUBLIC_MODULES = ["scipy." + s for s in [
+    "cluster",
+    "cluster.vq",
+    "cluster.hierarchy",
+    "constants",
+    "datasets",
+    "fft",
+    "fftpack",
+    "integrate",
+    "interpolate",
+    "io",
+    "io.arff",
+    "io.matlab",
+    "io.wavfile",
+    "linalg",
+    "linalg.blas",
+    "linalg.cython_blas",
+    "linalg.lapack",
+    "linalg.cython_lapack",
+    "linalg.interpolative",
+    "misc",
+    "ndimage",
+    "odr",
+    "optimize",
+    "signal",
+    "signal.windows",
+    "sparse",
+    "sparse.linalg",
+    "sparse.csgraph",
+    "spatial",
+    "spatial.distance",
+    "spatial.transform",
+    "special",
+    "stats",
+    "stats.contingency",
+    "stats.distributions",
+    "stats.mstats",
+    "stats.qmc",
+    "stats.sampling"
+]]
+
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores
+# in their name and hence looked public, but weren't meant to be. All these
+# namespace were deprecated in the 1.8.0 release - see "clear split between
+# public and private API" in the 1.8.0 release notes.
+# These private modules support will be removed in SciPy v2.0.0, as the
+# deprecation messages emitted by each of these modules say.
+PRIVATE_BUT_PRESENT_MODULES = [
+    'scipy.constants.codata',
+    'scipy.constants.constants',
+    'scipy.fftpack.basic',
+    'scipy.fftpack.convolve',
+    'scipy.fftpack.helper',
+    'scipy.fftpack.pseudo_diffs',
+    'scipy.fftpack.realtransforms',
+    'scipy.integrate.dop',
+    'scipy.integrate.lsoda',
+    'scipy.integrate.odepack',
+    'scipy.integrate.quadpack',
+    'scipy.integrate.vode',
+    'scipy.interpolate.dfitpack',
+    'scipy.interpolate.fitpack',
+    'scipy.interpolate.fitpack2',
+    'scipy.interpolate.interpnd',
+    'scipy.interpolate.interpolate',
+    'scipy.interpolate.ndgriddata',
+    'scipy.interpolate.polyint',
+    'scipy.interpolate.rbf',
+    'scipy.io.arff.arffread',
+    'scipy.io.harwell_boeing',
+    'scipy.io.idl',
+    'scipy.io.matlab.byteordercodes',
+    'scipy.io.matlab.mio',
+    'scipy.io.matlab.mio4',
+    'scipy.io.matlab.mio5',
+    'scipy.io.matlab.mio5_params',
+    'scipy.io.matlab.mio5_utils',
+    'scipy.io.matlab.mio_utils',
+    'scipy.io.matlab.miobase',
+    'scipy.io.matlab.streams',
+    'scipy.io.mmio',
+    'scipy.io.netcdf',
+    'scipy.linalg.basic',
+    'scipy.linalg.decomp',
+    'scipy.linalg.decomp_cholesky',
+    'scipy.linalg.decomp_lu',
+    'scipy.linalg.decomp_qr',
+    'scipy.linalg.decomp_schur',
+    'scipy.linalg.decomp_svd',
+    'scipy.linalg.matfuncs',
+    'scipy.linalg.misc',
+    'scipy.linalg.special_matrices',
+    'scipy.misc.common',
+    'scipy.misc.doccer',
+    'scipy.ndimage.filters',
+    'scipy.ndimage.fourier',
+    'scipy.ndimage.interpolation',
+    'scipy.ndimage.measurements',
+    'scipy.ndimage.morphology',
+    'scipy.odr.models',
+    'scipy.odr.odrpack',
+    'scipy.optimize.cobyla',
+    'scipy.optimize.cython_optimize',
+    'scipy.optimize.lbfgsb',
+    'scipy.optimize.linesearch',
+    'scipy.optimize.minpack',
+    'scipy.optimize.minpack2',
+    'scipy.optimize.moduleTNC',
+    'scipy.optimize.nonlin',
+    'scipy.optimize.optimize',
+    'scipy.optimize.slsqp',
+    'scipy.optimize.tnc',
+    'scipy.optimize.zeros',
+    'scipy.signal.bsplines',
+    'scipy.signal.filter_design',
+    'scipy.signal.fir_filter_design',
+    'scipy.signal.lti_conversion',
+    'scipy.signal.ltisys',
+    'scipy.signal.signaltools',
+    'scipy.signal.spectral',
+    'scipy.signal.spline',
+    'scipy.signal.waveforms',
+    'scipy.signal.wavelets',
+    'scipy.signal.windows.windows',
+    'scipy.sparse.base',
+    'scipy.sparse.bsr',
+    'scipy.sparse.compressed',
+    'scipy.sparse.construct',
+    'scipy.sparse.coo',
+    'scipy.sparse.csc',
+    'scipy.sparse.csr',
+    'scipy.sparse.data',
+    'scipy.sparse.dia',
+    'scipy.sparse.dok',
+    'scipy.sparse.extract',
+    'scipy.sparse.lil',
+    'scipy.sparse.linalg.dsolve',
+    'scipy.sparse.linalg.eigen',
+    'scipy.sparse.linalg.interface',
+    'scipy.sparse.linalg.isolve',
+    'scipy.sparse.linalg.matfuncs',
+    'scipy.sparse.sparsetools',
+    'scipy.sparse.spfuncs',
+    'scipy.sparse.sputils',
+    'scipy.spatial.ckdtree',
+    'scipy.spatial.kdtree',
+    'scipy.spatial.qhull',
+    'scipy.spatial.transform.rotation',
+    'scipy.special.add_newdocs',
+    'scipy.special.basic',
+    'scipy.special.cython_special',
+    'scipy.special.orthogonal',
+    'scipy.special.sf_error',
+    'scipy.special.specfun',
+    'scipy.special.spfun_stats',
+    'scipy.stats.biasedurn',
+    'scipy.stats.kde',
+    'scipy.stats.morestats',
+    'scipy.stats.mstats_basic',
+    'scipy.stats.mstats_extras',
+    'scipy.stats.mvn',
+    'scipy.stats.stats',
+]
+
+
+def is_unexpected(name):
+    """Check if this needs to be considered."""
+    if '._' in name or '.tests' in name or '.setup' in name:
+        return False
+
+    if name in PUBLIC_MODULES:
+        return False
+
+    if name in PRIVATE_BUT_PRESENT_MODULES:
+        return False
+
+    return True
+
+
+SKIP_LIST = [
+    'scipy.conftest',
+    'scipy.version',
+]
+
+
+# XXX: this test does more than it says on the tin - in using `pkgutil.walk_packages`,
+# it will raise if it encounters any exceptions which are not handled by `ignore_errors`
+# while attempting to import each discovered package.
+# For now, `ignore_errors` only ignores what is necessary, but this could be expanded -
+# for example, to all errors from private modules or git subpackages - if desired.
+def test_all_modules_are_expected():
+    """
+    Test that we don't add anything that looks like a new public module by
+    accident.  Check is based on filenames.
+    """
+
+    def ignore_errors(name):
+        # if versions of other array libraries are installed which are incompatible
+        # with the installed NumPy version, there can be errors on importing
+        # `array_api_compat`. This should only raise if SciPy is configured with
+        # that library as an available backend.
+        for backend, dir_name in {'cupy': 'cupy', 'pytorch': 'torch'}.items():
+            path = f'array_api_compat.{dir_name}'
+            if path in name and backend not in xp_available_backends:
+                return
+        raise
+
+    modnames = []
+
+    for _, modname, _ in pkgutil.walk_packages(path=scipy.__path__,
+                                               prefix=scipy.__name__ + '.',
+                                               onerror=ignore_errors):
+        if is_unexpected(modname) and modname not in SKIP_LIST:
+            # We have a name that is new.  If that's on purpose, add it to
+            # PUBLIC_MODULES.  We don't expect to have to add anything to
+            # PRIVATE_BUT_PRESENT_MODULES.  Use an underscore in the name!
+            modnames.append(modname)
+
+    if modnames:
+        raise AssertionError(f'Found unexpected modules: {modnames}')
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+    'scipy.char',
+    'scipy.rec',
+    'scipy.emath',
+    'scipy.math',
+    'scipy.random',
+    'scipy.ctypeslib',
+    'scipy.ma'
+]
+
+
+def test_all_modules_are_expected_2():
+    """
+    Method checking all objects. The pkgutil-based method in
+    `test_all_modules_are_expected` does not catch imports into a namespace,
+    only filenames.
+    """
+
+    def find_unexpected_members(mod_name):
+        members = []
+        module = importlib.import_module(mod_name)
+        if hasattr(module, '__all__'):
+            objnames = module.__all__
+        else:
+            objnames = dir(module)
+
+        for objname in objnames:
+            if not objname.startswith('_'):
+                fullobjname = mod_name + '.' + objname
+                if isinstance(getattr(module, objname), types.ModuleType):
+                    if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
+                        members.append(fullobjname)
+
+        return members
+
+    unexpected_members = find_unexpected_members("scipy")
+    for modname in PUBLIC_MODULES:
+        unexpected_members.extend(find_unexpected_members(modname))
+
+    if unexpected_members:
+        raise AssertionError("Found unexpected object(s) that look like "
+                             f"modules: {unexpected_members}")
+
+
+def test_api_importable():
+    """
+    Check that all submodules listed higher up in this file can be imported
+    Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+    simply need to be removed from the list (deprecation may or may not be
+    needed - apply common sense).
+    """
+    def check_importable(module_name):
+        try:
+            importlib.import_module(module_name)
+        except (ImportError, AttributeError):
+            return False
+
+        return True
+
+    module_names = []
+    for module_name in PUBLIC_MODULES:
+        if not check_importable(module_name):
+            module_names.append(module_name)
+
+    if module_names:
+        raise AssertionError("Modules in the public API that cannot be "
+                             f"imported: {module_names}")
+
+    with warnings.catch_warnings(record=True):
+        warnings.filterwarnings('always', category=DeprecationWarning)
+        warnings.filterwarnings('always', category=ImportWarning)
+        for module_name in PRIVATE_BUT_PRESENT_MODULES:
+            if not check_importable(module_name):
+                module_names.append(module_name)
+
+    if module_names:
+        raise AssertionError("Modules that are not really public but looked "
+                             "public and can not be imported: "
+                             f"{module_names}")
+
+
+@pytest.mark.parametrize(("module_name", "correct_module"),
+                         [('scipy.constants.codata', None),
+                          ('scipy.constants.constants', None),
+                          ('scipy.fftpack.basic', None),
+                          ('scipy.fftpack.helper', None),
+                          ('scipy.fftpack.pseudo_diffs', None),
+                          ('scipy.fftpack.realtransforms', None),
+                          ('scipy.integrate.dop', None),
+                          ('scipy.integrate.lsoda', None),
+                          ('scipy.integrate.odepack', None),
+                          ('scipy.integrate.quadpack', None),
+                          ('scipy.integrate.vode', None),
+                          ('scipy.interpolate.fitpack', None),
+                          ('scipy.interpolate.fitpack2', None),
+                          ('scipy.interpolate.interpolate', None),
+                          ('scipy.interpolate.ndgriddata', None),
+                          ('scipy.interpolate.polyint', None),
+                          ('scipy.interpolate.rbf', None),
+                          ('scipy.io.harwell_boeing', None),
+                          ('scipy.io.idl', None),
+                          ('scipy.io.mmio', None),
+                          ('scipy.io.netcdf', None),
+                          ('scipy.io.arff.arffread', 'arff'),
+                          ('scipy.io.matlab.byteordercodes', 'matlab'),
+                          ('scipy.io.matlab.mio_utils', 'matlab'),
+                          ('scipy.io.matlab.mio', 'matlab'),
+                          ('scipy.io.matlab.mio4', 'matlab'),
+                          ('scipy.io.matlab.mio5_params', 'matlab'),
+                          ('scipy.io.matlab.mio5_utils', 'matlab'),
+                          ('scipy.io.matlab.mio5', 'matlab'),
+                          ('scipy.io.matlab.miobase', 'matlab'),
+                          ('scipy.io.matlab.streams', 'matlab'),
+                          ('scipy.linalg.basic', None),
+                          ('scipy.linalg.decomp', None),
+                          ('scipy.linalg.decomp_cholesky', None),
+                          ('scipy.linalg.decomp_lu', None),
+                          ('scipy.linalg.decomp_qr', None),
+                          ('scipy.linalg.decomp_schur', None),
+                          ('scipy.linalg.decomp_svd', None),
+                          ('scipy.linalg.matfuncs', None),
+                          ('scipy.linalg.misc', None),
+                          ('scipy.linalg.special_matrices', None),
+                          ('scipy.misc.common', None),
+                          ('scipy.ndimage.filters', None),
+                          ('scipy.ndimage.fourier', None),
+                          ('scipy.ndimage.interpolation', None),
+                          ('scipy.ndimage.measurements', None),
+                          ('scipy.ndimage.morphology', None),
+                          ('scipy.odr.models', None),
+                          ('scipy.odr.odrpack', None),
+                          ('scipy.optimize.cobyla', None),
+                          ('scipy.optimize.lbfgsb', None),
+                          ('scipy.optimize.linesearch', None),
+                          ('scipy.optimize.minpack', None),
+                          ('scipy.optimize.minpack2', None),
+                          ('scipy.optimize.moduleTNC', None),
+                          ('scipy.optimize.nonlin', None),
+                          ('scipy.optimize.optimize', None),
+                          ('scipy.optimize.slsqp', None),
+                          ('scipy.optimize.tnc', None),
+                          ('scipy.optimize.zeros', None),
+                          ('scipy.signal.bsplines', None),
+                          ('scipy.signal.filter_design', None),
+                          ('scipy.signal.fir_filter_design', None),
+                          ('scipy.signal.lti_conversion', None),
+                          ('scipy.signal.ltisys', None),
+                          ('scipy.signal.signaltools', None),
+                          ('scipy.signal.spectral', None),
+                          ('scipy.signal.waveforms', None),
+                          ('scipy.signal.wavelets', None),
+                          ('scipy.signal.windows.windows', 'windows'),
+                          ('scipy.sparse.lil', None),
+                          ('scipy.sparse.linalg.dsolve', 'linalg'),
+                          ('scipy.sparse.linalg.eigen', 'linalg'),
+                          ('scipy.sparse.linalg.interface', 'linalg'),
+                          ('scipy.sparse.linalg.isolve', 'linalg'),
+                          ('scipy.sparse.linalg.matfuncs', 'linalg'),
+                          ('scipy.sparse.sparsetools', None),
+                          ('scipy.sparse.spfuncs', None),
+                          ('scipy.sparse.sputils', None),
+                          ('scipy.spatial.ckdtree', None),
+                          ('scipy.spatial.kdtree', None),
+                          ('scipy.spatial.qhull', None),
+                          ('scipy.spatial.transform.rotation', 'transform'),
+                          ('scipy.special.add_newdocs', None),
+                          ('scipy.special.basic', None),
+                          ('scipy.special.orthogonal', None),
+                          ('scipy.special.sf_error', None),
+                          ('scipy.special.specfun', None),
+                          ('scipy.special.spfun_stats', None),
+                          ('scipy.stats.biasedurn', None),
+                          ('scipy.stats.kde', None),
+                          ('scipy.stats.morestats', None),
+                          ('scipy.stats.mstats_basic', 'mstats'),
+                          ('scipy.stats.mstats_extras', 'mstats'),
+                          ('scipy.stats.mvn', None),
+                          ('scipy.stats.stats', None)])
+def test_private_but_present_deprecation(module_name, correct_module):
+    # gh-18279, gh-17572, gh-17771 noted that deprecation warnings
+    # for imports from private modules
+    # were misleading. Check that this is resolved.
+    module = import_module(module_name)
+    if correct_module is None:
+        import_name = f'scipy.{module_name.split(".")[1]}'
+    else:
+        import_name = f'scipy.{module_name.split(".")[1]}.{correct_module}'
+
+    correct_import = import_module(import_name)
+
+    # Attributes that were formerly in `module_name` can still be imported from
+    # `module_name`, albeit with a deprecation warning. The specific message
+    # depends on whether the attribute is public in `scipy.xxx` or not.
+    for attr_name in module.__all__:
+        attr = getattr(correct_import, attr_name, None)
+        if attr is None:
+            message = f"`{module_name}.{attr_name}` is deprecated..."
+        else:
+            message = f"Please import `{attr_name}` from the `{import_name}`..."
+        with pytest.deprecated_call(match=message):
+            getattr(module, attr_name)
+
+    # Attributes that were not in `module_name` get an error notifying the user
+    # that the attribute is not in `module_name` and that `module_name` is deprecated.
+    message = f"`{module_name}` is deprecated..."
+    with pytest.raises(AttributeError, match=message):
+        getattr(module, "ekki")
+
+
+def test_misc_doccer_deprecation():
+    # gh-18279, gh-17572, gh-17771 noted that deprecation warnings
+    # for imports from private modules were misleading.
+    # Check that this is resolved.
+    # `test_private_but_present_deprecation` cannot be used since `correct_import`
+    # is a different subpackage (`_lib` instead of `misc`).
+    module = import_module('scipy.misc.doccer')
+    correct_import = import_module('scipy._lib.doccer')
+
+    # Attributes that were formerly in `scipy.misc.doccer` can still be imported from
+    # `scipy.misc.doccer`, albeit with a deprecation warning. The specific message
+    # depends on whether the attribute is in `scipy._lib.doccer` or not.
+    for attr_name in module.__all__:
+        attr = getattr(correct_import, attr_name, None)
+        if attr is None:
+            message = f"`scipy.misc.{attr_name}` is deprecated..."
+        else:
+            message = f"Please import `{attr_name}` from the `scipy._lib.doccer`..."
+        with pytest.deprecated_call(match=message):
+            getattr(module, attr_name)
+
+    # Attributes that were not in `scipy.misc.doccer` get an error
+    # notifying the user that the attribute is not in `scipy.misc.doccer` 
+    # and that `scipy.misc.doccer` is deprecated.
+    message = "`scipy.misc.doccer` is deprecated..."
+    with pytest.raises(AttributeError, match=message):
+        getattr(module, "ekki")
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..21f0e8e26aad7500fee2fec9e845d5cf6caea4c6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py
@@ -0,0 +1,18 @@
+import re
+
+import scipy
+from numpy.testing import assert_
+
+
+def test_valid_scipy_version():
+    # Verify that the SciPy version is a valid one (no .post suffix or other
+    # nonsense). See NumPy issue gh-6431 for an issue caused by an invalid
+    # version.
+    version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])"
+    dev_suffix = r"(\.dev0\+.+([0-9a-f]{7}|Unknown))"
+    if scipy.version.release:
+        res = re.match(version_pattern, scipy.__version__)
+    else:
+        res = re.match(version_pattern + dev_suffix, scipy.__version__)
+
+    assert_(res is not None, scipy.__version__)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..734f42b32f8124924a7243b188f903eca40401e9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py
@@ -0,0 +1,42 @@
+""" Test tmpdirs module """
+from os import getcwd
+from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists
+
+from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir
+
+from numpy.testing import assert_, assert_equal
+
+MY_PATH = abspath(__file__)
+MY_DIR = dirname(MY_PATH)
+
+
+def test_tempdir():
+    with tempdir() as tmpdir:
+        fname = pjoin(tmpdir, 'example_file.txt')
+        with open(fname, "w") as fobj:
+            fobj.write('a string\\n')
+    assert_(not exists(tmpdir))
+
+
+def test_in_tempdir():
+    my_cwd = getcwd()
+    with in_tempdir() as tmpdir:
+        with open('test.txt', "w") as f:
+            f.write('some text')
+        assert_(isfile('test.txt'))
+        assert_(isfile(pjoin(tmpdir, 'test.txt')))
+    assert_(not exists(tmpdir))
+    assert_equal(getcwd(), my_cwd)
+
+
+def test_given_directory():
+    # Test InGivenDirectory
+    cwd = getcwd()
+    with in_dir() as tmpdir:
+        assert_equal(tmpdir, abspath(cwd))
+        assert_equal(tmpdir, abspath(getcwd()))
+    with in_dir(MY_DIR) as tmpdir:
+        assert_equal(tmpdir, MY_DIR)
+        assert_equal(realpath(MY_DIR), realpath(abspath(getcwd())))
+    # We were deleting the given directory! Check not so now.
+    assert_(isfile(MY_PATH))
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7a70ee2a78fac338fd56dc00de8e76777250452
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py
@@ -0,0 +1,137 @@
+"""
+Tests which scan for certain occurrences in the code, they may not find
+all of these occurrences but should catch almost all. This file was adapted
+from NumPy.
+"""
+
+
+import os
+from pathlib import Path
+import ast
+import tokenize
+
+import scipy
+
+import pytest
+
+
+class ParseCall(ast.NodeVisitor):
+    def __init__(self):
+        self.ls = []
+
+    def visit_Attribute(self, node):
+        ast.NodeVisitor.generic_visit(self, node)
+        self.ls.append(node.attr)
+
+    def visit_Name(self, node):
+        self.ls.append(node.id)
+
+
+class FindFuncs(ast.NodeVisitor):
+    def __init__(self, filename):
+        super().__init__()
+        self.__filename = filename
+        self.bad_filters = []
+        self.bad_stacklevels = []
+
+    def visit_Call(self, node):
+        p = ParseCall()
+        p.visit(node.func)
+        ast.NodeVisitor.generic_visit(self, node)
+
+        if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
+            if node.args[0].value == "ignore":
+                self.bad_filters.append(
+                    f"{self.__filename}:{node.lineno}")
+
+        if p.ls[-1] == 'warn' and (
+                len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+
+            if self.__filename == "_lib/tests/test_warnings.py":
+                # This file
+                return
+
+            # See if stacklevel exists:
+            if len(node.args) == 3:
+                return
+            args = {kw.arg for kw in node.keywords}
+            if "stacklevel" not in args:
+                self.bad_stacklevels.append(
+                    f"{self.__filename}:{node.lineno}")
+
+
+@pytest.fixture(scope="session")
+def warning_calls():
+    # combined "ignore" and stacklevel error
+    base = Path(scipy.__file__).parent
+
+    bad_filters = []
+    bad_stacklevels = []
+
+    for path in base.rglob("*.py"):
+        # use tokenize to auto-detect encoding on systems where no
+        # default encoding is defined (e.g., LANG='C')
+        with tokenize.open(str(path)) as file:
+            tree = ast.parse(file.read(), filename=str(path))
+            finder = FindFuncs(path.relative_to(base))
+            finder.visit(tree)
+            bad_filters.extend(finder.bad_filters)
+            bad_stacklevels.extend(finder.bad_stacklevels)
+
+    return bad_filters, bad_stacklevels
+
+
+@pytest.mark.slow
+def test_warning_calls_filters(warning_calls):
+    bad_filters, bad_stacklevels = warning_calls
+
+    # We try not to add filters in the code base, because those filters aren't
+    # thread-safe. We aim to only filter in tests with
+    # np.testing.suppress_warnings. However, in some cases it may prove
+    # necessary to filter out warnings, because we can't (easily) fix the root
+    # cause for them and we don't want users to see some warnings when they use
+    # SciPy correctly. So we list exceptions here.  Add new entries only if
+    # there's a good reason.
+    allowed_filters = (
+        os.path.join('datasets', '_fetchers.py'),
+        os.path.join('datasets', '__init__.py'),
+        os.path.join('optimize', '_optimize.py'),
+        os.path.join('optimize', '_constraints.py'),
+        os.path.join('optimize', '_nnls.py'),
+        os.path.join('signal', '_ltisys.py'),
+        os.path.join('sparse', '__init__.py'),  # np.matrix pending-deprecation
+        os.path.join('stats', '_discrete_distns.py'),  # gh-14901
+        os.path.join('stats', '_continuous_distns.py'),
+        os.path.join('stats', '_binned_statistic.py'),  # gh-19345
+        os.path.join('_lib', '_util.py'),  # gh-19341
+    )
+    bad_filters = [item for item in bad_filters if item.split(':')[0] not in
+                   allowed_filters]
+
+    if bad_filters:
+        raise AssertionError(
+            "warning ignore filter should not be used, instead, use\n"
+            "numpy.testing.suppress_warnings (in tests only);\n"
+            "found in:\n    {}".format(
+                "\n    ".join(bad_filters)))
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(reason="stacklevels currently missing")
+def test_warning_calls_stacklevels(warning_calls):
+    bad_filters, bad_stacklevels = warning_calls
+
+    msg = ""
+
+    if bad_filters:
+        msg += ("warning ignore filter should not be used, instead, use\n"
+                "numpy.testing.suppress_warnings (in tests only);\n"
+                "found in:\n    {}".format("\n    ".join(bad_filters)))
+        msg += "\n\n"
+
+    if bad_stacklevels:
+        msg += "warnings should have an appropriate stacklevel:\n    {}".format(
+                "\n    ".join(bad_stacklevels))
+
+    if msg:
+        raise AssertionError(msg)
diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/uarray.py b/venv/lib/python3.10/site-packages/scipy/_lib/uarray.py
new file mode 100644
index 0000000000000000000000000000000000000000..b29fc713efb3e836cc179ac87ce41f87b51870ef
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/_lib/uarray.py
@@ -0,0 +1,31 @@
+"""`uarray` provides functions for generating multimethods that dispatch to
+multiple different backends
+
+This should be imported, rather than `_uarray` so that an installed version could
+be used instead, if available. This means that users can call
+`uarray.set_backend` directly instead of going through SciPy.
+
+"""
+
+
+# Prefer an installed version of uarray, if available
+try:
+    import uarray as _uarray
+except ImportError:
+    _has_uarray = False
+else:
+    from scipy._lib._pep440 import Version as _Version
+
+    _has_uarray = _Version(_uarray.__version__) >= _Version("0.8")
+    del _uarray
+    del _Version
+
+
+if _has_uarray:
+    from uarray import *  # noqa: F403
+    from uarray import _Function
+else:
+    from ._uarray import *  # noqa: F403
+    from ._uarray import _Function  # noqa: F401
+
+del _has_uarray
diff --git a/venv/lib/python3.10/site-packages/scipy/special/special/cephes/const.h b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/const.h
new file mode 100644
index 0000000000000000000000000000000000000000..06299581f01a81de3ff79be5f876511c99ef306a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/const.h
@@ -0,0 +1,77 @@
+/* Translated into C++ by SciPy developers in 2024.
+ * Original header with Copyright information appears below.
+ *
+ * Since we support only IEEE-754 floating point numbers, conditional logic
+ * supporting other arithmetic types has been removed.
+ */
+
+/*
+ *
+ *
+ *                                                   const.c
+ *
+ *     Globally declared constants
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * extern double nameofconstant;
+ *
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * This file contains a number of mathematical constants and
+ * also some needed size parameters of the computer arithmetic.
+ * The values are supplied as arrays of hexadecimal integers
+ * for IEEE arithmetic, and in a normal decimal scientific notation for
+ * other machines.  The particular notation used is determined
+ * by a symbol (IBMPC, or UNK) defined in the include file
+ * mconf.h.
+ *
+ * The default size parameters are as follows.
+ *
+ * For UNK mode:
+ * MACHEP =  1.38777878078144567553E-17       2**-56
+ * MAXLOG =  8.8029691931113054295988E1       log(2**127)
+ * MINLOG = -8.872283911167299960540E1        log(2**-128)
+ *
+ * For IEEE arithmetic (IBMPC):
+ * MACHEP =  1.11022302462515654042E-16       2**-53
+ * MAXLOG =  7.09782712893383996843E2         log(2**1024)
+ * MINLOG = -7.08396418532264106224E2         log(2**-1022)
+ *
+ * The global symbols for mathematical constants are
+ * SQ2OPI =  7.9788456080286535587989E-1      sqrt( 2/pi )
+ * LOGSQ2 =  3.46573590279972654709E-1        log(2)/2
+ * THPIO4 =  2.35619449019234492885           3*pi/4
+ *
+ * These lists are subject to change.
+ */
+/*                                                     const.c */
+
+/*
+ * Cephes Math Library Release 2.3:  March, 1995
+ * Copyright 1984, 1995 by Stephen L. Moshier
+ */
+#pragma once
+
+namespace special {
+namespace cephes {
+    namespace detail {
+        constexpr double MACHEP = 1.11022302462515654042E-16;  // 2**-53
+        constexpr double MAXLOG = 7.09782712893383996732E2;    // log(DBL_MAX)
+        constexpr double MINLOG = -7.451332191019412076235E2;  // log 2**-1022
+        constexpr double SQ2OPI = 7.9788456080286535587989E-1; // sqrt( 2/pi )
+        constexpr double LOGSQ2 = 3.46573590279972654709E-1;   // log(2)/2
+        constexpr double THPIO4 = 2.35619449019234492885;      // 3*pi/4
+        // Following two added by SciPy developers.
+        // Euler's constant
+        constexpr double SCIPY_EULER = 0.577215664901532860606512090082402431;
+        // e as long double
+        constexpr long double SCIPY_El = 2.718281828459045235360287471352662498L;
+    } // namespace detail
+} // namespace cephes
+} // namespace special
diff --git a/venv/lib/python3.10/site-packages/scipy/special/special/cephes/polevl.h b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/polevl.h
new file mode 100644
index 0000000000000000000000000000000000000000..07a591cad5fd552045598184ea1eae6e2b0dced7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/polevl.h
@@ -0,0 +1,165 @@
+/* Translated into C++ by SciPy developers in 2024.
+ * Original header with Copyright information appears below.
+ */
+
+/*                                                     polevl.c
+ *                                                     p1evl.c
+ *
+ *     Evaluate polynomial
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * int N;
+ * double x, y, coef[N+1], polevl[];
+ *
+ * y = polevl( x, coef, N );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Evaluates polynomial of degree N:
+ *
+ *                     2          N
+ * y  =  C  + C x + C x  +...+ C x
+ *        0    1     2          N
+ *
+ * Coefficients are stored in reverse order:
+ *
+ * coef[0] = C  , ..., coef[N] = C  .
+ *            N                   0
+ *
+ * The function p1evl() assumes that c_N = 1.0 so that coefficent
+ * is omitted from the array.  Its calling arguments are
+ * otherwise the same as polevl().
+ *
+ *
+ * SPEED:
+ *
+ * In the interest of speed, there are no checks for out
+ * of bounds arithmetic.  This routine is used by most of
+ * the functions in the library.  Depending on available
+ * equipment features, the user may wish to rewrite the
+ * program in microcode or assembly language.
+ *
+ */
+
+/*
+ * Cephes Math Library Release 2.1:  December, 1988
+ * Copyright 1984, 1987, 1988 by Stephen L. Moshier
+ * Direct inquiries to 30 Frost Street, Cambridge, MA 02140
+ */
+
+/* Sources:
+ * [1] Holin et. al., "Polynomial and Rational Function Evaluation",
+ *     https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/roots/rational.html
+ */
+
+/* Scipy changes:
+ * - 06-23-2016: add code for evaluating rational functions
+ */
+#pragma once
+
+#include "../config.h"
+
+namespace special {
+namespace cephes {
+    SPECFUN_HOST_DEVICE inline double polevl(double x, const double coef[], int N) {
+        double ans;
+        int i;
+        const double *p;
+
+        p = coef;
+        ans = *p++;
+        i = N;
+
+        do {
+            ans = ans * x + *p++;
+        } while (--i);
+
+        return (ans);
+    }
+
+    /*                                                     p1evl() */
+    /*                                          N
+     * Evaluate polynomial when coefficient of x  is 1.0.
+     * That is, C_{N} is assumed to be 1, and that coefficient
+     * is not included in the input array coef.
+     * coef must have length N and contain the polynomial coefficients
+     * stored as
+     *     coef[0] = C_{N-1}
+     *     coef[1] = C_{N-2}
+     *          ...
+     *     coef[N-2] = C_1
+     *     coef[N-1] = C_0
+     * Otherwise same as polevl.
+     */
+
+    SPECFUN_HOST_DEVICE inline double p1evl(double x, const double coef[], int N) {
+        double ans;
+        const double *p;
+        int i;
+
+        p = coef;
+        ans = x + *p++;
+        i = N - 1;
+
+        do
+            ans = ans * x + *p++;
+        while (--i);
+
+        return (ans);
+    }
+
+    /* Evaluate a rational function. See [1]. */
+
+    SPECFUN_HOST_DEVICE inline double ratevl(double x, const double num[], int M, const double denom[], int N) {
+        int i, dir;
+        double y, num_ans, denom_ans;
+        double absx = std::abs(x);
+        const double *p;
+
+        if (absx > 1) {
+            /* Evaluate as a polynomial in 1/x. */
+            dir = -1;
+            p = num + M;
+            y = 1 / x;
+        } else {
+            dir = 1;
+            p = num;
+            y = x;
+        }
+
+        /* Evaluate the numerator */
+        num_ans = *p;
+        p += dir;
+        for (i = 1; i <= M; i++) {
+            num_ans = num_ans * y + *p;
+            p += dir;
+        }
+
+        /* Evaluate the denominator */
+        if (absx > 1) {
+            p = denom + N;
+        } else {
+            p = denom;
+        }
+
+        denom_ans = *p;
+        p += dir;
+        for (i = 1; i <= N; i++) {
+            denom_ans = denom_ans * y + *p;
+            p += dir;
+        }
+
+        if (absx > 1) {
+            i = N - M;
+            return std::pow(x, i) * num_ans / denom_ans;
+        } else {
+            return num_ans / denom_ans;
+        }
+    }
+} // namespace cephes
+} // namespace special
diff --git a/venv/lib/python3.10/site-packages/scipy/special/special/cephes/trig.h b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/trig.h
new file mode 100644
index 0000000000000000000000000000000000000000..26a3cf8ad7e5a29ee433cf95c2747b5054854cdd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/special/special/cephes/trig.h
@@ -0,0 +1,56 @@
+/* Translated into C++ by SciPy developers in 2024.
+ *
+ * Original author: Josh Wilson, 2020.
+ */
+
+/*
+ * Implement sin(pi * x) and cos(pi * x) for real x. Since the periods
+ * of these functions are integral (and thus representable in double
+ * precision), it's possible to compute them with greater accuracy
+ * than sin(x) and cos(x).
+ */
+#pragma once
+
+#include "../config.h"
+
+namespace special {
+namespace cephes {
+
+    /* Compute sin(pi * x). */
+    SPECFUN_HOST_DEVICE double sinpi(double x) {
+        double s = 1.0;
+
+        if (x < 0.0) {
+            x = -x;
+            s = -1.0;
+        }
+
+        double r = fmod(x, 2.0);
+        if (r < 0.5) {
+            return s * sin(M_PI * r);
+        } else if (r > 1.5) {
+            return s * sin(M_PI * (r - 2.0));
+        } else {
+            return -s * sin(M_PI * (r - 1.0));
+        }
+    }
+
+    /* Compute cos(pi * x) */
+    SPECFUN_HOST_DEVICE double cospi(double x) {
+        if (x < 0.0) {
+            x = -x;
+        }
+
+        double r = fmod(x, 2.0);
+        if (r == 0.5) {
+            // We don't want to return -0.0
+            return 0.0;
+        }
+        if (r < 1.0) {
+            return -sin(M_PI * (r - 0.5));
+        } else {
+            return sin(M_PI * (r - 1.5));
+        }
+    }
+} // namespace cephes
+} // namespace special